2024-11-20 19:24:55,624 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 19:24:55,635 main DEBUG Took 0.009161 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 19:24:55,635 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 19:24:55,636 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 19:24:55,636 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 19:24:55,637 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,644 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 19:24:55,655 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,656 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,656 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,657 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,657 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,657 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,658 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,658 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,659 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,659 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,660 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,660 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,661 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,661 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,661 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,662 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,662 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,662 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,663 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,663 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,663 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,664 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,664 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,664 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:55,665 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,665 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 19:24:55,666 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:55,667 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 19:24:55,669 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 19:24:55,670 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 19:24:55,671 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 19:24:55,671 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 19:24:55,678 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 19:24:55,681 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 19:24:55,682 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 19:24:55,683 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 19:24:55,683 main DEBUG createAppenders(={Console}) 2024-11-20 19:24:55,684 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-20 19:24:55,684 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 19:24:55,684 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-20 19:24:55,685 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 19:24:55,685 main DEBUG OutputStream closed 2024-11-20 19:24:55,685 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 19:24:55,685 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 19:24:55,686 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-20 19:24:55,758 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 19:24:55,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 19:24:55,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 19:24:55,763 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 19:24:55,764 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 19:24:55,764 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 19:24:55,764 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 19:24:55,765 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 19:24:55,765 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 19:24:55,765 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 19:24:55,766 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 19:24:55,766 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 19:24:55,766 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 19:24:55,767 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 19:24:55,767 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 19:24:55,767 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 19:24:55,767 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 19:24:55,768 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 19:24:55,770 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 19:24:55,771 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-20 19:24:55,771 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 19:24:55,771 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-20T19:24:55,980 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72 2024-11-20 19:24:55,983 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 19:24:55,983 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T19:24:55,991 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-20T19:24:56,012 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T19:24:56,016 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/cluster_db2e18e3-a485-48e5-5213-a6c18f012833, deleteOnExit=true 2024-11-20T19:24:56,016 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-20T19:24:56,017 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/test.cache.data in system properties and HBase conf 2024-11-20T19:24:56,018 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T19:24:56,019 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/hadoop.log.dir in system properties and HBase conf 2024-11-20T19:24:56,020 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T19:24:56,021 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T19:24:56,021 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-20T19:24:56,105 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T19:24:56,193 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T19:24:56,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T19:24:56,197 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T19:24:56,197 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T19:24:56,198 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T19:24:56,198 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T19:24:56,199 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T19:24:56,199 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T19:24:56,200 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T19:24:56,200 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T19:24:56,200 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/nfs.dump.dir in system properties and HBase conf 2024-11-20T19:24:56,201 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/java.io.tmpdir in system properties and HBase conf 2024-11-20T19:24:56,201 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T19:24:56,201 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T19:24:56,202 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T19:24:57,278 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T19:24:57,382 INFO [Time-limited test {}] log.Log(170): Logging initialized @2422ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T19:24:57,463 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T19:24:57,551 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T19:24:57,586 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T19:24:57,587 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T19:24:57,589 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T19:24:57,609 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T19:24:57,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/hadoop.log.dir/,AVAILABLE} 2024-11-20T19:24:57,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T19:24:57,861 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/java.io.tmpdir/jetty-localhost-42839-hadoop-hdfs-3_4_1-tests_jar-_-any-9771872979612063172/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T19:24:57,869 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:42839} 2024-11-20T19:24:57,870 INFO [Time-limited test {}] server.Server(415): Started @2910ms 2024-11-20T19:24:58,415 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T19:24:58,428 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T19:24:58,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T19:24:58,431 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T19:24:58,434 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T19:24:58,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@134e7cc5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/hadoop.log.dir/,AVAILABLE} 2024-11-20T19:24:58,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ca71a25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T19:24:58,572 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10ba49e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/java.io.tmpdir/jetty-localhost-44131-hadoop-hdfs-3_4_1-tests_jar-_-any-618143021852634619/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T19:24:58,574 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@661c2e9c{HTTP/1.1, (http/1.1)}{localhost:44131} 2024-11-20T19:24:58,574 INFO [Time-limited test {}] server.Server(415): Started @3614ms 2024-11-20T19:24:58,635 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T19:24:59,646 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/cluster_db2e18e3-a485-48e5-5213-a6c18f012833/dfs/data/data1/current/BP-1052772000-172.17.0.2-1732130696748/current, will proceed with Du for space computation calculation, 2024-11-20T19:24:59,646 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/cluster_db2e18e3-a485-48e5-5213-a6c18f012833/dfs/data/data2/current/BP-1052772000-172.17.0.2-1732130696748/current, will proceed with Du for space computation calculation, 2024-11-20T19:24:59,682 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T19:24:59,736 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa914e63649554ef9 with lease ID 0x773a26034dc0f95d: Processing first storage report for DS-ee48f466-408f-4130-85de-c2ab2688b70a from datanode DatanodeRegistration(127.0.0.1:36171, datanodeUuid=62e061be-1081-4234-973e-e9d14a5f767e, infoPort=41251, infoSecurePort=0, ipcPort=40845, storageInfo=lv=-57;cid=testClusterID;nsid=2037531190;c=1732130696748) 2024-11-20T19:24:59,737 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa914e63649554ef9 with lease ID 0x773a26034dc0f95d: from storage DS-ee48f466-408f-4130-85de-c2ab2688b70a node DatanodeRegistration(127.0.0.1:36171, datanodeUuid=62e061be-1081-4234-973e-e9d14a5f767e, infoPort=41251, infoSecurePort=0, ipcPort=40845, storageInfo=lv=-57;cid=testClusterID;nsid=2037531190;c=1732130696748), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T19:24:59,738 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa914e63649554ef9 with lease ID 0x773a26034dc0f95d: Processing first storage report for DS-5f42dff7-713d-4eaf-9e9f-42d90d7d7498 from datanode DatanodeRegistration(127.0.0.1:36171, datanodeUuid=62e061be-1081-4234-973e-e9d14a5f767e, infoPort=41251, infoSecurePort=0, ipcPort=40845, storageInfo=lv=-57;cid=testClusterID;nsid=2037531190;c=1732130696748) 2024-11-20T19:24:59,738 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa914e63649554ef9 with lease ID 0x773a26034dc0f95d: from storage DS-5f42dff7-713d-4eaf-9e9f-42d90d7d7498 node DatanodeRegistration(127.0.0.1:36171, datanodeUuid=62e061be-1081-4234-973e-e9d14a5f767e, infoPort=41251, infoSecurePort=0, ipcPort=40845, storageInfo=lv=-57;cid=testClusterID;nsid=2037531190;c=1732130696748), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T19:24:59,778 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72 2024-11-20T19:24:59,853 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/cluster_db2e18e3-a485-48e5-5213-a6c18f012833/zookeeper_0, clientPort=49985, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/cluster_db2e18e3-a485-48e5-5213-a6c18f012833/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/cluster_db2e18e3-a485-48e5-5213-a6c18f012833/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T19:24:59,862 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=49985 2024-11-20T19:24:59,871 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:24:59,873 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:00,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741825_1001 (size=7) 2024-11-20T19:25:00,487 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203 with version=8 2024-11-20T19:25:00,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/hbase-staging 2024-11-20T19:25:00,621 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T19:25:00,978 INFO [Time-limited test {}] client.ConnectionUtils(129): master/db9c3a6c6492:0 server-side Connection retries=45 2024-11-20T19:25:00,993 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T19:25:00,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T19:25:00,994 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T19:25:00,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T19:25:00,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T19:25:01,115 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T19:25:01,166 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T19:25:01,174 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T19:25:01,177 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T19:25:01,199 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 52508 (auto-detected) 2024-11-20T19:25:01,199 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T19:25:01,216 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46833 2024-11-20T19:25:01,223 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:01,224 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:01,235 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:46833 connecting to ZooKeeper ensemble=127.0.0.1:49985 2024-11-20T19:25:01,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:468330x0, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T19:25:01,338 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46833-0x1015afe9cb30000 connected 2024-11-20T19:25:01,407 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T19:25:01,410 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T19:25:01,413 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T19:25:01,417 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46833 2024-11-20T19:25:01,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46833 2024-11-20T19:25:01,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46833 2024-11-20T19:25:01,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46833 2024-11-20T19:25:01,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46833 2024-11-20T19:25:01,426 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203, hbase.cluster.distributed=false 2024-11-20T19:25:01,497 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/db9c3a6c6492:0 server-side Connection retries=45 2024-11-20T19:25:01,497 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T19:25:01,497 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T19:25:01,497 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T19:25:01,498 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T19:25:01,498 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T19:25:01,500 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T19:25:01,503 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T19:25:01,504 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41229 2024-11-20T19:25:01,506 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T19:25:01,511 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T19:25:01,513 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:01,516 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:01,520 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41229 connecting to ZooKeeper ensemble=127.0.0.1:49985 2024-11-20T19:25:01,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412290x0, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T19:25:01,531 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:412290x0, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T19:25:01,531 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41229-0x1015afe9cb30001 connected 2024-11-20T19:25:01,533 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T19:25:01,534 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T19:25:01,534 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41229 2024-11-20T19:25:01,535 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41229 2024-11-20T19:25:01,535 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41229 2024-11-20T19:25:01,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41229 2024-11-20T19:25:01,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41229 2024-11-20T19:25:01,538 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/db9c3a6c6492,46833,1732130700613 2024-11-20T19:25:01,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T19:25:01,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T19:25:01,550 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db9c3a6c6492,46833,1732130700613 2024-11-20T19:25:01,556 DEBUG [M:0;db9c3a6c6492:46833 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db9c3a6c6492:46833 2024-11-20T19:25:01,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T19:25:01,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T19:25:01,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:01,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:01,573 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T19:25:01,574 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T19:25:01,574 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db9c3a6c6492,46833,1732130700613 from backup master directory 2024-11-20T19:25:01,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T19:25:01,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db9c3a6c6492,46833,1732130700613 2024-11-20T19:25:01,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T19:25:01,582 WARN [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T19:25:01,582 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db9c3a6c6492,46833,1732130700613 2024-11-20T19:25:01,584 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T19:25:01,586 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T19:25:01,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741826_1002 (size=42) 2024-11-20T19:25:01,675 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/hbase.id with ID: f721b149-9118-459d-8112-3ff1cc720c86 2024-11-20T19:25:01,737 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:01,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:01,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:01,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741827_1003 (size=196) 2024-11-20T19:25:01,838 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:25:01,840 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T19:25:01,862 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:01,870 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T19:25:01,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741828_1004 (size=1189) 2024-11-20T19:25:01,922 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store 2024-11-20T19:25:01,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741829_1005 (size=34) 2024-11-20T19:25:02,348 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T19:25:02,349 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:02,351 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T19:25:02,351 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:25:02,351 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:25:02,352 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T19:25:02,352 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:25:02,352 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:25:02,353 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T19:25:02,355 WARN [master/db9c3a6c6492:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/.initializing 2024-11-20T19:25:02,355 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/WALs/db9c3a6c6492,46833,1732130700613 2024-11-20T19:25:02,361 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T19:25:02,376 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9c3a6c6492%2C46833%2C1732130700613, suffix=, logDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/WALs/db9c3a6c6492,46833,1732130700613, archiveDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/oldWALs, maxLogs=10 2024-11-20T19:25:02,405 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/WALs/db9c3a6c6492,46833,1732130700613/db9c3a6c6492%2C46833%2C1732130700613.1732130702382, exclude list is [], retry=0 2024-11-20T19:25:02,420 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36171,DS-ee48f466-408f-4130-85de-c2ab2688b70a,DISK] 2024-11-20T19:25:02,423 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-20T19:25:02,455 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/WALs/db9c3a6c6492,46833,1732130700613/db9c3a6c6492%2C46833%2C1732130700613.1732130702382 2024-11-20T19:25:02,455 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41251:41251)] 2024-11-20T19:25:02,456 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:25:02,456 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:02,459 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:02,460 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:02,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:02,519 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T19:25:02,522 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:02,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:02,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:02,530 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T19:25:02,530 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:02,532 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:02,532 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:02,536 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T19:25:02,536 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:02,537 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:02,537 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:02,540 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T19:25:02,540 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:02,541 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:02,545 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:02,546 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:02,555 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T19:25:02,560 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:02,565 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:25:02,567 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61636037, jitterRate=-0.08155147731304169}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T19:25:02,573 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T19:25:02,574 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T19:25:02,598 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a40dda4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:02,625 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-20T19:25:02,636 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T19:25:02,636 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T19:25:02,638 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T19:25:02,640 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-20T19:25:02,644 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 3 msec 2024-11-20T19:25:02,644 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T19:25:02,671 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T19:25:02,684 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T19:25:02,688 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-20T19:25:02,691 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T19:25:02,692 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T19:25:02,696 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-20T19:25:02,698 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T19:25:02,701 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T19:25:02,705 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-20T19:25:02,706 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T19:25:02,713 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T19:25:02,722 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T19:25:02,730 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T19:25:02,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T19:25:02,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T19:25:02,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:02,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:02,739 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=db9c3a6c6492,46833,1732130700613, sessionid=0x1015afe9cb30000, setting cluster-up flag (Was=false) 2024-11-20T19:25:02,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:02,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:02,788 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T19:25:02,790 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9c3a6c6492,46833,1732130700613 2024-11-20T19:25:02,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:02,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:02,830 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T19:25:02,832 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9c3a6c6492,46833,1732130700613 2024-11-20T19:25:02,855 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db9c3a6c6492:41229 2024-11-20T19:25:02,857 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1008): ClusterId : f721b149-9118-459d-8112-3ff1cc720c86 2024-11-20T19:25:02,860 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T19:25:02,874 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T19:25:02,874 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T19:25:02,882 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T19:25:02,882 DEBUG [RS:0;db9c3a6c6492:41229 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69278ad2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:02,885 DEBUG [RS:0;db9c3a6c6492:41229 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47968602, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9c3a6c6492/172.17.0.2:0 2024-11-20T19:25:02,889 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-20T19:25:02,889 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-20T19:25:02,889 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-20T19:25:02,892 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(3073): reportForDuty to master=db9c3a6c6492,46833,1732130700613 with isa=db9c3a6c6492/172.17.0.2:41229, startcode=1732130701496 2024-11-20T19:25:02,904 DEBUG [RS:0;db9c3a6c6492:41229 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T19:25:02,915 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-20T19:25:02,922 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-20T19:25:02,925 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T19:25:02,930 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db9c3a6c6492,46833,1732130700613 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T19:25:02,933 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db9c3a6c6492:0, corePoolSize=5, maxPoolSize=5 2024-11-20T19:25:02,933 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db9c3a6c6492:0, corePoolSize=5, maxPoolSize=5 2024-11-20T19:25:02,934 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db9c3a6c6492:0, corePoolSize=5, maxPoolSize=5 2024-11-20T19:25:02,934 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db9c3a6c6492:0, corePoolSize=5, maxPoolSize=5 2024-11-20T19:25:02,934 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db9c3a6c6492:0, corePoolSize=10, maxPoolSize=10 2024-11-20T19:25:02,934 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:02,935 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db9c3a6c6492:0, corePoolSize=2, maxPoolSize=2 2024-11-20T19:25:02,935 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:02,937 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732130732937 2024-11-20T19:25:02,939 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T19:25:02,940 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T19:25:02,940 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T19:25:02,940 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-20T19:25:02,943 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56833, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T19:25:02,944 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T19:25:02,944 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T19:25:02,944 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T19:25:02,944 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:02,945 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T19:25:02,945 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:02,945 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T19:25:02,951 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T19:25:02,952 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T19:25:02,953 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T19:25:02,955 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46833 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:02,959 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T19:25:02,959 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T19:25:02,961 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db9c3a6c6492:0:becomeActiveMaster-HFileCleaner.large.0-1732130702960,5,FailOnTimeoutGroup] 2024-11-20T19:25:02,961 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db9c3a6c6492:0:becomeActiveMaster-HFileCleaner.small.0-1732130702961,5,FailOnTimeoutGroup] 2024-11-20T19:25:02,961 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:02,962 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T19:25:02,963 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:02,963 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:02,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741831_1007 (size=1039) 2024-11-20T19:25:02,983 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-11-20T19:25:02,984 WARN [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-20T19:25:03,085 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(3073): reportForDuty to master=db9c3a6c6492,46833,1732130700613 with isa=db9c3a6c6492/172.17.0.2:41229, startcode=1732130701496 2024-11-20T19:25:03,087 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46833 {}] master.ServerManager(332): Checking decommissioned status of RegionServer db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:03,089 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46833 {}] master.ServerManager(486): Registering regionserver=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:03,096 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203 2024-11-20T19:25:03,097 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:40371 2024-11-20T19:25:03,097 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-20T19:25:03,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T19:25:03,114 DEBUG [RS:0;db9c3a6c6492:41229 {}] zookeeper.ZKUtil(111): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:03,114 WARN [RS:0;db9c3a6c6492:41229 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T19:25:03,114 INFO [RS:0;db9c3a6c6492:41229 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T19:25:03,115 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/WALs/db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:03,116 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db9c3a6c6492,41229,1732130701496] 2024-11-20T19:25:03,127 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-20T19:25:03,138 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T19:25:03,155 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T19:25:03,158 INFO [RS:0;db9c3a6c6492:41229 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T19:25:03,158 INFO [RS:0;db9c3a6c6492:41229 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:03,159 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-20T19:25:03,166 INFO [RS:0;db9c3a6c6492:41229 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:03,166 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:03,166 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:03,166 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:03,166 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:03,167 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:03,167 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db9c3a6c6492:0, corePoolSize=2, maxPoolSize=2 2024-11-20T19:25:03,167 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:03,167 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:03,167 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:03,167 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:03,167 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:03,168 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db9c3a6c6492:0, corePoolSize=3, maxPoolSize=3 2024-11-20T19:25:03,168 DEBUG [RS:0;db9c3a6c6492:41229 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0, corePoolSize=3, maxPoolSize=3 2024-11-20T19:25:03,169 INFO [RS:0;db9c3a6c6492:41229 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:03,169 INFO [RS:0;db9c3a6c6492:41229 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:03,169 INFO [RS:0;db9c3a6c6492:41229 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:03,169 INFO [RS:0;db9c3a6c6492:41229 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:03,169 INFO [RS:0;db9c3a6c6492:41229 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,41229,1732130701496-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T19:25:03,195 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T19:25:03,196 INFO [RS:0;db9c3a6c6492:41229 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,41229,1732130701496-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:03,218 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.Replication(204): db9c3a6c6492,41229,1732130701496 started 2024-11-20T19:25:03,218 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1767): Serving as db9c3a6c6492,41229,1732130701496, RpcServer on db9c3a6c6492/172.17.0.2:41229, sessionid=0x1015afe9cb30001 2024-11-20T19:25:03,219 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T19:25:03,220 DEBUG [RS:0;db9c3a6c6492:41229 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:03,220 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9c3a6c6492,41229,1732130701496' 2024-11-20T19:25:03,220 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T19:25:03,221 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T19:25:03,222 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T19:25:03,222 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T19:25:03,222 DEBUG [RS:0;db9c3a6c6492:41229 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:03,222 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9c3a6c6492,41229,1732130701496' 2024-11-20T19:25:03,222 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T19:25:03,223 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T19:25:03,224 DEBUG [RS:0;db9c3a6c6492:41229 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T19:25:03,224 INFO [RS:0;db9c3a6c6492:41229 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T19:25:03,224 INFO [RS:0;db9c3a6c6492:41229 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T19:25:03,329 INFO [RS:0;db9c3a6c6492:41229 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T19:25:03,333 INFO [RS:0;db9c3a6c6492:41229 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9c3a6c6492%2C41229%2C1732130701496, suffix=, logDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/WALs/db9c3a6c6492,41229,1732130701496, archiveDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/oldWALs, maxLogs=32 2024-11-20T19:25:03,350 DEBUG [RS:0;db9c3a6c6492:41229 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/WALs/db9c3a6c6492,41229,1732130701496/db9c3a6c6492%2C41229%2C1732130701496.1732130703336, exclude list is [], retry=0 2024-11-20T19:25:03,356 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36171,DS-ee48f466-408f-4130-85de-c2ab2688b70a,DISK] 2024-11-20T19:25:03,359 INFO [RS:0;db9c3a6c6492:41229 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/WALs/db9c3a6c6492,41229,1732130701496/db9c3a6c6492%2C41229%2C1732130701496.1732130703336 2024-11-20T19:25:03,360 DEBUG [RS:0;db9c3a6c6492:41229 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41251:41251)] 2024-11-20T19:25:03,367 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-20T19:25:03,368 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203 2024-11-20T19:25:03,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741833_1009 (size=32) 2024-11-20T19:25:03,784 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:03,791 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T19:25:03,795 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T19:25:03,796 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:03,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:03,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T19:25:03,800 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T19:25:03,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:03,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:03,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T19:25:03,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T19:25:03,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:03,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:03,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740 2024-11-20T19:25:03,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740 2024-11-20T19:25:03,810 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:25:03,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T19:25:03,817 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:25:03,818 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72942833, jitterRate=0.08693291246891022}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:25:03,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T19:25:03,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T19:25:03,822 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T19:25:03,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T19:25:03,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T19:25:03,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T19:25:03,824 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T19:25:03,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T19:25:03,826 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T19:25:03,826 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-20T19:25:03,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T19:25:03,839 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T19:25:03,841 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T19:25:03,992 DEBUG [db9c3a6c6492:46833 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T19:25:03,999 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:04,005 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9c3a6c6492,41229,1732130701496, state=OPENING 2024-11-20T19:25:04,013 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T19:25:04,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:04,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:04,022 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T19:25:04,023 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T19:25:04,025 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:25:04,211 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:04,212 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T19:25:04,215 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T19:25:04,226 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-20T19:25:04,226 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T19:25:04,227 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-20T19:25:04,231 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9c3a6c6492%2C41229%2C1732130701496.meta, suffix=.meta, logDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/WALs/db9c3a6c6492,41229,1732130701496, archiveDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/oldWALs, maxLogs=32 2024-11-20T19:25:04,246 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/WALs/db9c3a6c6492,41229,1732130701496/db9c3a6c6492%2C41229%2C1732130701496.meta.1732130704233.meta, exclude list is [], retry=0 2024-11-20T19:25:04,250 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36171,DS-ee48f466-408f-4130-85de-c2ab2688b70a,DISK] 2024-11-20T19:25:04,253 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/WALs/db9c3a6c6492,41229,1732130701496/db9c3a6c6492%2C41229%2C1732130701496.meta.1732130704233.meta 2024-11-20T19:25:04,253 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41251:41251)] 2024-11-20T19:25:04,253 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:25:04,255 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T19:25:04,307 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T19:25:04,311 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T19:25:04,315 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T19:25:04,315 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:04,316 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-20T19:25:04,316 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-20T19:25:04,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T19:25:04,321 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T19:25:04,321 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:04,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:04,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T19:25:04,323 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T19:25:04,324 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:04,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:04,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T19:25:04,326 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T19:25:04,326 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:04,327 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:04,329 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740 2024-11-20T19:25:04,331 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740 2024-11-20T19:25:04,334 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:25:04,337 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T19:25:04,338 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69282840, jitterRate=0.03239476680755615}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:25:04,339 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T19:25:04,346 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732130704205 2024-11-20T19:25:04,356 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T19:25:04,357 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-20T19:25:04,357 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:04,359 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9c3a6c6492,41229,1732130701496, state=OPEN 2024-11-20T19:25:04,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T19:25:04,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T19:25:04,449 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T19:25:04,449 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T19:25:04,453 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T19:25:04,453 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=db9c3a6c6492,41229,1732130701496 in 424 msec 2024-11-20T19:25:04,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T19:25:04,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 623 msec 2024-11-20T19:25:04,467 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5980 sec 2024-11-20T19:25:04,467 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732130704467, completionTime=-1 2024-11-20T19:25:04,467 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T19:25:04,468 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-20T19:25:04,502 DEBUG [hconnection-0x429eb088-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:04,504 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:04,514 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-20T19:25:04,514 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732130764514 2024-11-20T19:25:04,514 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732130824514 2024-11-20T19:25:04,514 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 46 msec 2024-11-20T19:25:04,548 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,46833,1732130700613-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:04,549 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,46833,1732130700613-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:04,549 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,46833,1732130700613-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:04,550 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db9c3a6c6492:46833, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:04,550 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:04,555 DEBUG [master/db9c3a6c6492:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-20T19:25:04,558 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-20T19:25:04,560 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T19:25:04,566 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-20T19:25:04,568 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:25:04,569 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:04,571 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:25:04,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741835_1011 (size=358) 2024-11-20T19:25:04,990 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 26eb6e9aec5a60a946cc3400b187b0a4, NAME => 'hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203 2024-11-20T19:25:05,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741836_1012 (size=42) 2024-11-20T19:25:05,402 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:05,403 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 26eb6e9aec5a60a946cc3400b187b0a4, disabling compactions & flushes 2024-11-20T19:25:05,403 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. 2024-11-20T19:25:05,403 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. 2024-11-20T19:25:05,403 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. after waiting 0 ms 2024-11-20T19:25:05,403 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. 2024-11-20T19:25:05,403 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. 2024-11-20T19:25:05,403 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 26eb6e9aec5a60a946cc3400b187b0a4: 2024-11-20T19:25:05,405 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:25:05,413 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732130705406"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130705406"}]},"ts":"1732130705406"} 2024-11-20T19:25:05,459 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:25:05,466 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:25:05,469 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130705466"}]},"ts":"1732130705466"} 2024-11-20T19:25:05,473 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-20T19:25:05,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=26eb6e9aec5a60a946cc3400b187b0a4, ASSIGN}] 2024-11-20T19:25:05,525 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=26eb6e9aec5a60a946cc3400b187b0a4, ASSIGN 2024-11-20T19:25:05,527 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=26eb6e9aec5a60a946cc3400b187b0a4, ASSIGN; state=OFFLINE, location=db9c3a6c6492,41229,1732130701496; forceNewPlan=false, retain=false 2024-11-20T19:25:05,678 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=26eb6e9aec5a60a946cc3400b187b0a4, regionState=OPENING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:05,685 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 26eb6e9aec5a60a946cc3400b187b0a4, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:25:05,840 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:05,846 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. 2024-11-20T19:25:05,847 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 26eb6e9aec5a60a946cc3400b187b0a4, NAME => 'hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:25:05,847 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 26eb6e9aec5a60a946cc3400b187b0a4 2024-11-20T19:25:05,847 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:05,848 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 26eb6e9aec5a60a946cc3400b187b0a4 2024-11-20T19:25:05,848 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 26eb6e9aec5a60a946cc3400b187b0a4 2024-11-20T19:25:05,850 INFO [StoreOpener-26eb6e9aec5a60a946cc3400b187b0a4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 26eb6e9aec5a60a946cc3400b187b0a4 2024-11-20T19:25:05,853 INFO [StoreOpener-26eb6e9aec5a60a946cc3400b187b0a4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 26eb6e9aec5a60a946cc3400b187b0a4 columnFamilyName info 2024-11-20T19:25:05,853 DEBUG [StoreOpener-26eb6e9aec5a60a946cc3400b187b0a4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:05,854 INFO [StoreOpener-26eb6e9aec5a60a946cc3400b187b0a4-1 {}] regionserver.HStore(327): Store=26eb6e9aec5a60a946cc3400b187b0a4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:05,855 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/namespace/26eb6e9aec5a60a946cc3400b187b0a4 2024-11-20T19:25:05,856 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/namespace/26eb6e9aec5a60a946cc3400b187b0a4 2024-11-20T19:25:05,859 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 26eb6e9aec5a60a946cc3400b187b0a4 2024-11-20T19:25:05,863 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/namespace/26eb6e9aec5a60a946cc3400b187b0a4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:25:05,864 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 26eb6e9aec5a60a946cc3400b187b0a4; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71784223, jitterRate=0.06966827809810638}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T19:25:05,865 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 26eb6e9aec5a60a946cc3400b187b0a4: 2024-11-20T19:25:05,868 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4., pid=6, masterSystemTime=1732130705840 2024-11-20T19:25:05,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. 2024-11-20T19:25:05,871 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. 2024-11-20T19:25:05,872 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=26eb6e9aec5a60a946cc3400b187b0a4, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:05,881 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T19:25:05,883 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 26eb6e9aec5a60a946cc3400b187b0a4, server=db9c3a6c6492,41229,1732130701496 in 192 msec 2024-11-20T19:25:05,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T19:25:05,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=26eb6e9aec5a60a946cc3400b187b0a4, ASSIGN in 358 msec 2024-11-20T19:25:05,888 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:25:05,888 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130705888"}]},"ts":"1732130705888"} 2024-11-20T19:25:05,891 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-20T19:25:05,899 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:25:05,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.3380 sec 2024-11-20T19:25:05,970 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-20T19:25:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-20T19:25:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:06,013 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-20T19:25:06,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T19:25:06,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 41 msec 2024-11-20T19:25:06,059 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-20T19:25:06,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T19:25:06,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 33 msec 2024-11-20T19:25:06,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-20T19:25:06,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-20T19:25:06,138 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.556sec 2024-11-20T19:25:06,140 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T19:25:06,142 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T19:25:06,143 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T19:25:06,144 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T19:25:06,144 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T19:25:06,145 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,46833,1732130700613-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T19:25:06,145 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,46833,1732130700613-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T19:25:06,152 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-20T19:25:06,153 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T19:25:06,153 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,46833,1732130700613-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:06,156 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38630296 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@76523d14 2024-11-20T19:25:06,157 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-20T19:25:06,165 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75444e35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:06,169 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T19:25:06,169 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T19:25:06,179 DEBUG [hconnection-0x25802b45-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:06,186 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:06,199 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=db9c3a6c6492,46833,1732130700613 2024-11-20T19:25:06,211 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=196, ProcessCount=11, AvailableMemoryMB=4774 2024-11-20T19:25:06,223 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:25:06,225 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58940, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:25:06,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:25:06,235 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:25:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:06,239 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:25:06,240 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:06,240 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-20T19:25:06,242 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:25:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T19:25:06,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741837_1013 (size=960) 2024-11-20T19:25:06,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T19:25:06,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T19:25:06,657 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203 2024-11-20T19:25:06,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741838_1014 (size=53) 2024-11-20T19:25:06,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T19:25:07,069 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:07,069 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6fb4967d0e6203ca72c498496394ce45, disabling compactions & flushes 2024-11-20T19:25:07,069 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:07,069 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:07,069 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. after waiting 0 ms 2024-11-20T19:25:07,069 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:07,069 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:07,069 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:07,071 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:25:07,071 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732130707071"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130707071"}]},"ts":"1732130707071"} 2024-11-20T19:25:07,074 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:25:07,076 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:25:07,076 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130707076"}]},"ts":"1732130707076"} 2024-11-20T19:25:07,079 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T19:25:07,097 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6fb4967d0e6203ca72c498496394ce45, ASSIGN}] 2024-11-20T19:25:07,099 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6fb4967d0e6203ca72c498496394ce45, ASSIGN 2024-11-20T19:25:07,101 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6fb4967d0e6203ca72c498496394ce45, ASSIGN; state=OFFLINE, location=db9c3a6c6492,41229,1732130701496; forceNewPlan=false, retain=false 2024-11-20T19:25:07,251 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6fb4967d0e6203ca72c498496394ce45, regionState=OPENING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:07,255 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:25:07,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T19:25:07,410 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:07,419 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:07,420 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:25:07,421 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:07,421 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:07,421 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:07,421 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:07,424 INFO [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:07,428 INFO [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:07,428 INFO [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6fb4967d0e6203ca72c498496394ce45 columnFamilyName A 2024-11-20T19:25:07,428 DEBUG [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:07,430 INFO [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] regionserver.HStore(327): Store=6fb4967d0e6203ca72c498496394ce45/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:07,430 INFO [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:07,433 INFO [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:07,433 INFO [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6fb4967d0e6203ca72c498496394ce45 columnFamilyName B 2024-11-20T19:25:07,433 DEBUG [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:07,434 INFO [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] regionserver.HStore(327): Store=6fb4967d0e6203ca72c498496394ce45/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:07,435 INFO [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:07,437 INFO [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:07,438 INFO [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6fb4967d0e6203ca72c498496394ce45 columnFamilyName C 2024-11-20T19:25:07,438 DEBUG [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:07,439 INFO [StoreOpener-6fb4967d0e6203ca72c498496394ce45-1 {}] regionserver.HStore(327): Store=6fb4967d0e6203ca72c498496394ce45/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:07,440 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:07,442 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:07,443 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:07,446 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:25:07,450 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:07,454 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:25:07,456 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 6fb4967d0e6203ca72c498496394ce45; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65624578, jitterRate=-0.022117584943771362}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:25:07,457 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:07,459 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., pid=11, masterSystemTime=1732130707409 2024-11-20T19:25:07,463 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:07,463 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:07,464 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6fb4967d0e6203ca72c498496394ce45, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:07,471 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-20T19:25:07,473 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 in 212 msec 2024-11-20T19:25:07,476 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T19:25:07,476 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6fb4967d0e6203ca72c498496394ce45, ASSIGN in 374 msec 2024-11-20T19:25:07,478 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:25:07,478 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130707478"}]},"ts":"1732130707478"} 2024-11-20T19:25:07,482 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T19:25:07,491 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:25:07,495 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2550 sec 2024-11-20T19:25:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T19:25:08,368 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-20T19:25:08,376 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e67f019 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6fcb5f29 2024-11-20T19:25:08,423 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fdf5682, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:08,426 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:08,429 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41952, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:08,432 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:25:08,435 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43652, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:25:08,443 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5095ba91 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f2091cc 2024-11-20T19:25:08,456 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79d38d10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:08,458 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12885408 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9bd0964 2024-11-20T19:25:08,473 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72b32f98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:08,475 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62c43377 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18cb251d 2024-11-20T19:25:08,481 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736f1673, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:08,483 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04977266 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45b55c24 2024-11-20T19:25:08,489 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bbb5d8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:08,491 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a8f4734 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e52b42a 2024-11-20T19:25:08,497 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f34ff67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:08,500 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10c964e8 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9ed28bb 2024-11-20T19:25:08,506 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b5cad1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:08,507 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72e97e4b to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12a1285d 2024-11-20T19:25:08,514 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3b736e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:08,516 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x527c6d40 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@353bc462 2024-11-20T19:25:08,522 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@767a8485, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:08,525 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c8de680 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47fe2fa7 2024-11-20T19:25:08,531 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6502d571, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:08,538 DEBUG [hconnection-0x5fa3171e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:08,542 DEBUG [hconnection-0x76214a8b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:08,542 DEBUG [hconnection-0x3a4b465a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:08,542 DEBUG [hconnection-0x2e827a6f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:08,543 DEBUG [hconnection-0x13693b96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:08,544 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41966, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:08,544 DEBUG [hconnection-0x2a80f7f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:08,544 DEBUG [hconnection-0x19e0dde4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:08,544 DEBUG [hconnection-0x1eb647c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:08,545 DEBUG [hconnection-0x12cddefe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:08,545 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:08,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-20T19:25:08,551 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41976, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:08,551 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:08,552 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41978, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:08,552 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42008, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:08,552 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:08,553 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42036, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:08,553 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42042, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:08,554 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42022, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:08,554 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:08,555 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:08,556 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:08,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:08,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:08,625 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:08,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:08,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:08,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:08,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:08,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:08,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:08,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:08,721 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:08,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:08,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:08,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:08,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:08,725 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:08,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:08,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:08,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/0d816f012458428889902f64c2d797f5 is 50, key is test_row_0/A:col10/1732130708607/Put/seqid=0 2024-11-20T19:25:08,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130768763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:08,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:08,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130768769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130768770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:08,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130768775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:08,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130768782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:08,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741839_1015 (size=12001) 2024-11-20T19:25:08,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:08,893 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:08,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:08,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:08,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130768889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:08,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130768888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130768891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:08,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130768894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:08,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130768892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:08,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:08,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:08,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:08,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:08,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,064 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:09,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:09,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:09,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:09,088 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130769105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130769108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130769108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130769110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130769112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,132 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T19:25:09,133 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T19:25:09,134 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-20T19:25:09,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:09,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/0d816f012458428889902f64c2d797f5 2024-11-20T19:25:09,247 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:09,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:09,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:09,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:09,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/a0deb564ac854e7a838d058b63c00d9b is 50, key is test_row_0/B:col10/1732130708607/Put/seqid=0 2024-11-20T19:25:09,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741840_1016 (size=12001) 2024-11-20T19:25:09,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/a0deb564ac854e7a838d058b63c00d9b 2024-11-20T19:25:09,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130769412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,414 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130769415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:09,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:09,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:09,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:09,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130769421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130769420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130769417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/22df1258abbf4d8f817635f99e10d4ab is 50, key is test_row_0/C:col10/1732130708607/Put/seqid=0 2024-11-20T19:25:09,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741841_1017 (size=12001) 2024-11-20T19:25:09,583 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:09,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:09,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:09,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:09,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:09,739 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:09,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:09,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:09,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:09,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,833 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T19:25:09,885 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/22df1258abbf4d8f817635f99e10d4ab 2024-11-20T19:25:09,895 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:09,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:09,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:09,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:09,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:09,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/0d816f012458428889902f64c2d797f5 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/0d816f012458428889902f64c2d797f5 2024-11-20T19:25:09,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130769922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130769924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130769933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,936 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130769933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:09,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130769936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:09,952 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/0d816f012458428889902f64c2d797f5, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T19:25:09,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/a0deb564ac854e7a838d058b63c00d9b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/a0deb564ac854e7a838d058b63c00d9b 2024-11-20T19:25:10,006 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/a0deb564ac854e7a838d058b63c00d9b, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T19:25:10,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/22df1258abbf4d8f817635f99e10d4ab as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/22df1258abbf4d8f817635f99e10d4ab 2024-11-20T19:25:10,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/22df1258abbf4d8f817635f99e10d4ab, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T19:25:10,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6fb4967d0e6203ca72c498496394ce45 in 1413ms, sequenceid=13, compaction requested=false 2024-11-20T19:25:10,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:10,051 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:10,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:10,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:10,053 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:25:10,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:10,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:10,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:10,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:10,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:10,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:10,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/4b2bb5d3999b49f8989f9be04f92cfb1 is 50, key is test_row_0/A:col10/1732130708761/Put/seqid=0 2024-11-20T19:25:10,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741842_1018 (size=12001) 2024-11-20T19:25:10,109 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/4b2bb5d3999b49f8989f9be04f92cfb1 2024-11-20T19:25:10,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/99649a47c7e7422b97db68b0c9ae66ec is 50, key is test_row_0/B:col10/1732130708761/Put/seqid=0 2024-11-20T19:25:10,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741843_1019 (size=12001) 2024-11-20T19:25:10,162 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/99649a47c7e7422b97db68b0c9ae66ec 2024-11-20T19:25:10,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/fedfcd10b5c0477383ba68355a96b65b is 50, key is test_row_0/C:col10/1732130708761/Put/seqid=0 2024-11-20T19:25:10,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741844_1020 (size=12001) 2024-11-20T19:25:10,230 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/fedfcd10b5c0477383ba68355a96b65b 2024-11-20T19:25:10,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/4b2bb5d3999b49f8989f9be04f92cfb1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/4b2bb5d3999b49f8989f9be04f92cfb1 2024-11-20T19:25:10,259 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/4b2bb5d3999b49f8989f9be04f92cfb1, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T19:25:10,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/99649a47c7e7422b97db68b0c9ae66ec as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/99649a47c7e7422b97db68b0c9ae66ec 2024-11-20T19:25:10,277 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/99649a47c7e7422b97db68b0c9ae66ec, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T19:25:10,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/fedfcd10b5c0477383ba68355a96b65b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/fedfcd10b5c0477383ba68355a96b65b 2024-11-20T19:25:10,303 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/fedfcd10b5c0477383ba68355a96b65b, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T19:25:10,305 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 6fb4967d0e6203ca72c498496394ce45 in 253ms, sequenceid=37, compaction requested=false 2024-11-20T19:25:10,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:10,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:10,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-20T19:25:10,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-20T19:25:10,322 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-20T19:25:10,322 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7520 sec 2024-11-20T19:25:10,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.7770 sec 2024-11-20T19:25:10,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:10,669 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-20T19:25:10,672 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:10,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-20T19:25:10,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T19:25:10,676 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:10,680 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:10,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:10,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T19:25:10,835 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:10,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T19:25:10,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:10,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:10,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:10,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-20T19:25:10,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-20T19:25:10,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-20T19:25:10,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 159 msec 2024-11-20T19:25:10,850 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 174 msec 2024-11-20T19:25:10,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:10,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:25:10,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:10,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:10,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:10,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:10,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:10,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:10,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T19:25:10,978 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-20T19:25:10,981 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:10,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-20T19:25:10,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T19:25:10,989 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:10,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/1b5e0a8dd28244959843e0c12bf928de is 50, key is test_row_0/A:col10/1732130710961/Put/seqid=0 2024-11-20T19:25:10,993 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:10,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:11,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741845_1021 (size=16681) 2024-11-20T19:25:11,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/1b5e0a8dd28244959843e0c12bf928de 2024-11-20T19:25:11,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/4bfb23c97eea404ca818d658227a7da9 is 50, key is test_row_0/B:col10/1732130710961/Put/seqid=0 2024-11-20T19:25:11,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130771050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130771058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130771063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130771068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130771069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T19:25:11,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741846_1022 (size=12001) 2024-11-20T19:25:11,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/4bfb23c97eea404ca818d658227a7da9 2024-11-20T19:25:11,149 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/3b8457b0bdc249adbbb0ef7fc36c376b is 50, key is test_row_0/C:col10/1732130710961/Put/seqid=0 2024-11-20T19:25:11,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T19:25:11,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:11,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,163 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T19:25:11,164 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T19:25:11,166 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-20T19:25:11,166 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-20T19:25:11,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T19:25:11,168 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T19:25:11,169 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T19:25:11,169 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T19:25:11,170 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T19:25:11,170 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T19:25:11,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130771175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130771176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130771174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741847_1023 (size=12001) 2024-11-20T19:25:11,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130771174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/3b8457b0bdc249adbbb0ef7fc36c376b 2024-11-20T19:25:11,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130771186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/1b5e0a8dd28244959843e0c12bf928de as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/1b5e0a8dd28244959843e0c12bf928de 2024-11-20T19:25:11,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/1b5e0a8dd28244959843e0c12bf928de, entries=250, sequenceid=50, filesize=16.3 K 2024-11-20T19:25:11,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/4bfb23c97eea404ca818d658227a7da9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/4bfb23c97eea404ca818d658227a7da9 2024-11-20T19:25:11,283 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/4bfb23c97eea404ca818d658227a7da9, entries=150, sequenceid=50, filesize=11.7 K 2024-11-20T19:25:11,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/3b8457b0bdc249adbbb0ef7fc36c376b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/3b8457b0bdc249adbbb0ef7fc36c376b 2024-11-20T19:25:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T19:25:11,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/3b8457b0bdc249adbbb0ef7fc36c376b, entries=150, sequenceid=50, filesize=11.7 K 2024-11-20T19:25:11,304 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T19:25:11,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:11,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,309 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6fb4967d0e6203ca72c498496394ce45 in 338ms, sequenceid=50, compaction requested=true 2024-11-20T19:25:11,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:11,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:11,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:11,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:11,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:11,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:11,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:11,339 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:11,340 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:11,346 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:11,348 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/B is initiating minor compaction (all files) 2024-11-20T19:25:11,348 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/B in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,349 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/a0deb564ac854e7a838d058b63c00d9b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/99649a47c7e7422b97db68b0c9ae66ec, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/4bfb23c97eea404ca818d658227a7da9] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=35.2 K 2024-11-20T19:25:11,350 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:11,351 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/A is initiating minor compaction (all files) 2024-11-20T19:25:11,351 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a0deb564ac854e7a838d058b63c00d9b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732130708607 2024-11-20T19:25:11,351 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/A in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,351 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/0d816f012458428889902f64c2d797f5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/4b2bb5d3999b49f8989f9be04f92cfb1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/1b5e0a8dd28244959843e0c12bf928de] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=39.7 K 2024-11-20T19:25:11,352 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 99649a47c7e7422b97db68b0c9ae66ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732130708761 2024-11-20T19:25:11,352 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d816f012458428889902f64c2d797f5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732130708607 2024-11-20T19:25:11,354 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bfb23c97eea404ca818d658227a7da9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732130710955 2024-11-20T19:25:11,355 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b2bb5d3999b49f8989f9be04f92cfb1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732130708761 2024-11-20T19:25:11,356 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b5e0a8dd28244959843e0c12bf928de, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732130710955 2024-11-20T19:25:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:11,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:11,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:11,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:11,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:11,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:11,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:11,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:11,434 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#B#compaction#9 average throughput is 0.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:11,436 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/f1df051742d44e8db8ad75ddb12f151e is 50, key is test_row_0/B:col10/1732130710961/Put/seqid=0 2024-11-20T19:25:11,436 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#A#compaction#10 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:11,437 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/7beb14e34462404a871e3436c9133f11 is 50, key is test_row_0/A:col10/1732130710961/Put/seqid=0 2024-11-20T19:25:11,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/c28a6f8404f54976abfff8e804970842 is 50, key is test_row_0/A:col10/1732130711409/Put/seqid=0 2024-11-20T19:25:11,462 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T19:25:11,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:11,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130771456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130771459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130771479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130771487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130771492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741849_1025 (size=12104) 2024-11-20T19:25:11,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741848_1024 (size=12104) 2024-11-20T19:25:11,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741850_1026 (size=14341) 2024-11-20T19:25:11,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/c28a6f8404f54976abfff8e804970842 2024-11-20T19:25:11,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T19:25:11,607 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/f1df051742d44e8db8ad75ddb12f151e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/f1df051742d44e8db8ad75ddb12f151e 2024-11-20T19:25:11,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130771597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,619 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,619 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T19:25:11,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:11,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,620 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130771598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130771612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130771612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130771614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/fc0e6e713249499283df9b901ee7c881 is 50, key is test_row_0/B:col10/1732130711409/Put/seqid=0 2024-11-20T19:25:11,656 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/B of 6fb4967d0e6203ca72c498496394ce45 into f1df051742d44e8db8ad75ddb12f151e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:11,657 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:11,657 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/B, priority=13, startTime=1732130711339; duration=0sec 2024-11-20T19:25:11,657 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:11,658 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:B 2024-11-20T19:25:11,658 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:11,667 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:11,667 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/C is initiating minor compaction (all files) 2024-11-20T19:25:11,667 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/C in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,667 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/22df1258abbf4d8f817635f99e10d4ab, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/fedfcd10b5c0477383ba68355a96b65b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/3b8457b0bdc249adbbb0ef7fc36c376b] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=35.2 K 2024-11-20T19:25:11,676 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 22df1258abbf4d8f817635f99e10d4ab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732130708607 2024-11-20T19:25:11,678 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting fedfcd10b5c0477383ba68355a96b65b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732130708761 2024-11-20T19:25:11,680 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b8457b0bdc249adbbb0ef7fc36c376b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732130710955 2024-11-20T19:25:11,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741851_1027 (size=12001) 2024-11-20T19:25:11,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/fc0e6e713249499283df9b901ee7c881 2024-11-20T19:25:11,733 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#C#compaction#13 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:11,742 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/a116e1c98d36455ea2a8c3d1313c5c5d is 50, key is test_row_0/C:col10/1732130710961/Put/seqid=0 2024-11-20T19:25:11,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/eba9057af49445bca438a75750ef786a is 50, key is test_row_0/C:col10/1732130711409/Put/seqid=0 2024-11-20T19:25:11,774 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T19:25:11,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:11,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741852_1028 (size=12104) 2024-11-20T19:25:11,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741853_1029 (size=12001) 2024-11-20T19:25:11,824 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/eba9057af49445bca438a75750ef786a 2024-11-20T19:25:11,825 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/a116e1c98d36455ea2a8c3d1313c5c5d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/a116e1c98d36455ea2a8c3d1313c5c5d 2024-11-20T19:25:11,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130771826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130771827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130771827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130771829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130771831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,843 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/C of 6fb4967d0e6203ca72c498496394ce45 into a116e1c98d36455ea2a8c3d1313c5c5d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:11,843 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:11,843 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/C, priority=13, startTime=1732130711339; duration=0sec 2024-11-20T19:25:11,843 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:11,843 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:C 2024-11-20T19:25:11,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/c28a6f8404f54976abfff8e804970842 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/c28a6f8404f54976abfff8e804970842 2024-11-20T19:25:11,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/c28a6f8404f54976abfff8e804970842, entries=200, sequenceid=75, filesize=14.0 K 2024-11-20T19:25:11,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/fc0e6e713249499283df9b901ee7c881 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fc0e6e713249499283df9b901ee7c881 2024-11-20T19:25:11,901 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fc0e6e713249499283df9b901ee7c881, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T19:25:11,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/eba9057af49445bca438a75750ef786a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/eba9057af49445bca438a75750ef786a 2024-11-20T19:25:11,929 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:11,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T19:25:11,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:11,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:11,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/eba9057af49445bca438a75750ef786a, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T19:25:11,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6fb4967d0e6203ca72c498496394ce45 in 524ms, sequenceid=75, compaction requested=false 2024-11-20T19:25:11,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:11,961 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/7beb14e34462404a871e3436c9133f11 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/7beb14e34462404a871e3436c9133f11 2024-11-20T19:25:11,980 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/A of 6fb4967d0e6203ca72c498496394ce45 into 7beb14e34462404a871e3436c9133f11(size=11.8 K), total size for store is 25.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:11,980 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:11,980 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/A, priority=13, startTime=1732130711310; duration=0sec 2024-11-20T19:25:11,980 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:11,980 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:A 2024-11-20T19:25:12,086 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T19:25:12,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:12,087 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:12,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:12,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:12,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:12,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:12,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:12,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:12,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T19:25:12,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/088d47e8de1c49e185e4562227adcf75 is 50, key is test_row_0/A:col10/1732130711480/Put/seqid=0 2024-11-20T19:25:12,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:12,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:12,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741854_1030 (size=12001) 2024-11-20T19:25:12,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130772253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130772255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130772262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130772257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130772263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130772368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130772368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130772383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130772383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130772384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,559 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/088d47e8de1c49e185e4562227adcf75 2024-11-20T19:25:12,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130772575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130772575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/9c240517dac44740a074f90bb0d17708 is 50, key is test_row_0/B:col10/1732130711480/Put/seqid=0 2024-11-20T19:25:12,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130772594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130772622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130772622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741855_1031 (size=12001) 2024-11-20T19:25:12,659 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/9c240517dac44740a074f90bb0d17708 2024-11-20T19:25:12,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/8314e20ce6dc43faac73fd822d7196d9 is 50, key is test_row_0/C:col10/1732130711480/Put/seqid=0 2024-11-20T19:25:12,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741856_1032 (size=12001) 2024-11-20T19:25:12,740 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/8314e20ce6dc43faac73fd822d7196d9 2024-11-20T19:25:12,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/088d47e8de1c49e185e4562227adcf75 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/088d47e8de1c49e185e4562227adcf75 2024-11-20T19:25:12,771 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/088d47e8de1c49e185e4562227adcf75, entries=150, sequenceid=89, filesize=11.7 K 2024-11-20T19:25:12,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/9c240517dac44740a074f90bb0d17708 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9c240517dac44740a074f90bb0d17708 2024-11-20T19:25:12,791 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9c240517dac44740a074f90bb0d17708, entries=150, sequenceid=89, filesize=11.7 K 2024-11-20T19:25:12,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/8314e20ce6dc43faac73fd822d7196d9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8314e20ce6dc43faac73fd822d7196d9 2024-11-20T19:25:12,821 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8314e20ce6dc43faac73fd822d7196d9, entries=150, sequenceid=89, filesize=11.7 K 2024-11-20T19:25:12,827 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 6fb4967d0e6203ca72c498496394ce45 in 739ms, sequenceid=89, compaction requested=true 2024-11-20T19:25:12,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:12,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:12,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-20T19:25:12,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-20T19:25:12,837 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-20T19:25:12,837 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8400 sec 2024-11-20T19:25:12,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.8580 sec 2024-11-20T19:25:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:12,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T19:25:12,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:12,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:12,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:12,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:12,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:12,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:12,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/5ee6f65607574231bebafbe6ce1910ef is 50, key is test_row_0/A:col10/1732130712900/Put/seqid=0 2024-11-20T19:25:12,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130772941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130772942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130772952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130772955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130772962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:12,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741857_1033 (size=12001) 2024-11-20T19:25:12,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/5ee6f65607574231bebafbe6ce1910ef 2024-11-20T19:25:13,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/57f25b51db134126b8cf2e447672dbad is 50, key is test_row_0/B:col10/1732130712900/Put/seqid=0 2024-11-20T19:25:13,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741858_1034 (size=12001) 2024-11-20T19:25:13,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130773060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130773064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130773074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T19:25:13,101 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-20T19:25:13,104 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:13,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-20T19:25:13,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:13,108 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:13,110 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:13,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:13,269 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:13,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:13,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:13,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:13,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:13,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:13,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130773280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130773281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130773285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:13,424 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:13,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:13,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:13,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:13,426 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:13,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:13,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:13,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130773458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130773458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/57f25b51db134126b8cf2e447672dbad 2024-11-20T19:25:13,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/c1886967b8f04b64821f3c0edd8cc2e0 is 50, key is test_row_0/C:col10/1732130712900/Put/seqid=0 2024-11-20T19:25:13,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741859_1035 (size=12001) 2024-11-20T19:25:13,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/c1886967b8f04b64821f3c0edd8cc2e0 2024-11-20T19:25:13,581 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:13,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:13,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:13,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:13,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:13,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:13,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:13,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/5ee6f65607574231bebafbe6ce1910ef as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/5ee6f65607574231bebafbe6ce1910ef 2024-11-20T19:25:13,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130773592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/5ee6f65607574231bebafbe6ce1910ef, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T19:25:13,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130773592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/57f25b51db134126b8cf2e447672dbad as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/57f25b51db134126b8cf2e447672dbad 2024-11-20T19:25:13,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130773599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,625 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/57f25b51db134126b8cf2e447672dbad, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T19:25:13,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/c1886967b8f04b64821f3c0edd8cc2e0 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/c1886967b8f04b64821f3c0edd8cc2e0 2024-11-20T19:25:13,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/c1886967b8f04b64821f3c0edd8cc2e0, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T19:25:13,642 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 6fb4967d0e6203ca72c498496394ce45 in 736ms, sequenceid=117, compaction requested=true 2024-11-20T19:25:13,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:13,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:13,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:13,642 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:13,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:13,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:13,643 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:13,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:13,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:13,645 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50447 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:13,645 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/A is initiating minor compaction (all files) 2024-11-20T19:25:13,645 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/A in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:13,646 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/7beb14e34462404a871e3436c9133f11, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/c28a6f8404f54976abfff8e804970842, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/088d47e8de1c49e185e4562227adcf75, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/5ee6f65607574231bebafbe6ce1910ef] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=49.3 K 2024-11-20T19:25:13,646 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:13,646 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/B is initiating minor compaction (all files) 2024-11-20T19:25:13,646 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/B in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:13,647 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/f1df051742d44e8db8ad75ddb12f151e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fc0e6e713249499283df9b901ee7c881, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9c240517dac44740a074f90bb0d17708, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/57f25b51db134126b8cf2e447672dbad] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=47.0 K 2024-11-20T19:25:13,647 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7beb14e34462404a871e3436c9133f11, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732130710955 2024-11-20T19:25:13,647 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting f1df051742d44e8db8ad75ddb12f151e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732130710955 2024-11-20T19:25:13,648 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting c28a6f8404f54976abfff8e804970842, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732130711016 2024-11-20T19:25:13,648 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting fc0e6e713249499283df9b901ee7c881, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732130711051 2024-11-20T19:25:13,648 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 088d47e8de1c49e185e4562227adcf75, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732130711455 2024-11-20T19:25:13,649 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c240517dac44740a074f90bb0d17708, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732130711455 2024-11-20T19:25:13,649 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ee6f65607574231bebafbe6ce1910ef, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732130712258 2024-11-20T19:25:13,650 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 57f25b51db134126b8cf2e447672dbad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732130712258 2024-11-20T19:25:13,683 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#B#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:13,684 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/c49a1e93ecea4fcbbad6cf23436a8350 is 50, key is test_row_0/B:col10/1732130712900/Put/seqid=0 2024-11-20T19:25:13,700 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#A#compaction#22 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:13,701 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/a354941f689f47858645ec80cd42014c is 50, key is test_row_0/A:col10/1732130712900/Put/seqid=0 2024-11-20T19:25:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:13,737 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:13,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:13,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:13,739 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T19:25:13,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:13,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:13,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:13,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:13,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:13,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:13,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741860_1036 (size=12241) 2024-11-20T19:25:13,795 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/c49a1e93ecea4fcbbad6cf23436a8350 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c49a1e93ecea4fcbbad6cf23436a8350 2024-11-20T19:25:13,813 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/B of 6fb4967d0e6203ca72c498496394ce45 into c49a1e93ecea4fcbbad6cf23436a8350(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:13,813 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:13,813 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/B, priority=12, startTime=1732130713642; duration=0sec 2024-11-20T19:25:13,813 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:13,813 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:B 2024-11-20T19:25:13,814 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:13,822 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:13,822 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/C is initiating minor compaction (all files) 2024-11-20T19:25:13,822 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/C in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:13,823 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/a116e1c98d36455ea2a8c3d1313c5c5d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/eba9057af49445bca438a75750ef786a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8314e20ce6dc43faac73fd822d7196d9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/c1886967b8f04b64821f3c0edd8cc2e0] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=47.0 K 2024-11-20T19:25:13,824 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a116e1c98d36455ea2a8c3d1313c5c5d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732130710955 2024-11-20T19:25:13,825 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting eba9057af49445bca438a75750ef786a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732130711051 2024-11-20T19:25:13,826 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8314e20ce6dc43faac73fd822d7196d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732130711455 2024-11-20T19:25:13,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741861_1037 (size=12241) 2024-11-20T19:25:13,828 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c1886967b8f04b64821f3c0edd8cc2e0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732130712258 2024-11-20T19:25:13,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/357302b6d3ae457fb28d7f66ce904d2e is 50, key is test_row_0/A:col10/1732130712951/Put/seqid=0 2024-11-20T19:25:13,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741862_1038 (size=12001) 2024-11-20T19:25:13,886 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#C#compaction#24 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:13,888 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/7e9cb734ebbe4f79bd8659bd686569de is 50, key is test_row_0/C:col10/1732130712900/Put/seqid=0 2024-11-20T19:25:13,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741863_1039 (size=12241) 2024-11-20T19:25:13,960 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/7e9cb734ebbe4f79bd8659bd686569de as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/7e9cb734ebbe4f79bd8659bd686569de 2024-11-20T19:25:13,976 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/C of 6fb4967d0e6203ca72c498496394ce45 into 7e9cb734ebbe4f79bd8659bd686569de(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:13,976 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:13,976 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/C, priority=12, startTime=1732130713643; duration=0sec 2024-11-20T19:25:13,977 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:13,977 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:C 2024-11-20T19:25:14,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:14,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:14,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:14,261 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/a354941f689f47858645ec80cd42014c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/a354941f689f47858645ec80cd42014c 2024-11-20T19:25:14,273 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/357302b6d3ae457fb28d7f66ce904d2e 2024-11-20T19:25:14,287 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/A of 6fb4967d0e6203ca72c498496394ce45 into a354941f689f47858645ec80cd42014c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:14,287 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:14,287 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/A, priority=12, startTime=1732130713642; duration=0sec 2024-11-20T19:25:14,288 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:14,289 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:A 2024-11-20T19:25:14,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/76f2496e541f4ee6a441cabc1aefe265 is 50, key is test_row_0/B:col10/1732130712951/Put/seqid=0 2024-11-20T19:25:14,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130774312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:14,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130774321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:14,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130774321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:14,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741864_1040 (size=12001) 2024-11-20T19:25:14,356 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/76f2496e541f4ee6a441cabc1aefe265 2024-11-20T19:25:14,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/bf26af61a4ee4fb4809938fc033e44fd is 50, key is test_row_0/C:col10/1732130712951/Put/seqid=0 2024-11-20T19:25:14,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130774424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:14,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130774427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:14,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130774425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:14,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741865_1041 (size=12001) 2024-11-20T19:25:14,471 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130774469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:14,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130774472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:14,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130774630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:14,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130774640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:14,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130774640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:14,850 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/bf26af61a4ee4fb4809938fc033e44fd 2024-11-20T19:25:14,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/357302b6d3ae457fb28d7f66ce904d2e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/357302b6d3ae457fb28d7f66ce904d2e 2024-11-20T19:25:14,881 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/357302b6d3ae457fb28d7f66ce904d2e, entries=150, sequenceid=126, filesize=11.7 K 2024-11-20T19:25:14,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/76f2496e541f4ee6a441cabc1aefe265 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/76f2496e541f4ee6a441cabc1aefe265 2024-11-20T19:25:14,903 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/76f2496e541f4ee6a441cabc1aefe265, entries=150, sequenceid=126, filesize=11.7 K 2024-11-20T19:25:14,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/bf26af61a4ee4fb4809938fc033e44fd as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/bf26af61a4ee4fb4809938fc033e44fd 2024-11-20T19:25:14,916 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/bf26af61a4ee4fb4809938fc033e44fd, entries=150, sequenceid=126, filesize=11.7 K 2024-11-20T19:25:14,917 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 6fb4967d0e6203ca72c498496394ce45 in 1178ms, sequenceid=126, compaction requested=false 2024-11-20T19:25:14,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:14,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:14,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-20T19:25:14,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-20T19:25:14,928 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-20T19:25:14,928 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8100 sec 2024-11-20T19:25:14,932 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.8210 sec 2024-11-20T19:25:14,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T19:25:14,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:14,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:14,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:14,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:14,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:14,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:14,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:14,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/cbdf3c2ff1294c36917b88c53c8d621f is 50, key is test_row_0/A:col10/1732130714960/Put/seqid=0 2024-11-20T19:25:14,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130774980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:14,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130774988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130774997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741866_1042 (size=12151) 2024-11-20T19:25:15,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130775091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130775093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130775105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:15,219 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-20T19:25:15,224 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:15,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-20T19:25:15,227 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:15,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T19:25:15,229 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:15,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:15,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130775300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130775304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130775314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T19:25:15,393 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T19:25:15,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:15,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:15,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:15,395 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/cbdf3c2ff1294c36917b88c53c8d621f 2024-11-20T19:25:15,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/5287e990a5434af58ede6489e9ec6408 is 50, key is test_row_0/B:col10/1732130714960/Put/seqid=0 2024-11-20T19:25:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T19:25:15,548 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,556 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T19:25:15,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:15,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741867_1043 (size=12151) 2024-11-20T19:25:15,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/5287e990a5434af58ede6489e9ec6408 2024-11-20T19:25:15,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:15,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:15,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/2c7f5bdc0c3a4659afa3a79386a172db is 50, key is test_row_0/C:col10/1732130714960/Put/seqid=0 2024-11-20T19:25:15,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130775615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741868_1044 (size=12151) 2024-11-20T19:25:15,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/2c7f5bdc0c3a4659afa3a79386a172db 2024-11-20T19:25:15,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130775628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130775622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/cbdf3c2ff1294c36917b88c53c8d621f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/cbdf3c2ff1294c36917b88c53c8d621f 2024-11-20T19:25:15,658 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/cbdf3c2ff1294c36917b88c53c8d621f, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T19:25:15,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/5287e990a5434af58ede6489e9ec6408 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/5287e990a5434af58ede6489e9ec6408 2024-11-20T19:25:15,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/5287e990a5434af58ede6489e9ec6408, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T19:25:15,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/2c7f5bdc0c3a4659afa3a79386a172db as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/2c7f5bdc0c3a4659afa3a79386a172db 2024-11-20T19:25:15,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/2c7f5bdc0c3a4659afa3a79386a172db, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T19:25:15,700 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 6fb4967d0e6203ca72c498496394ce45 in 734ms, sequenceid=158, compaction requested=true 2024-11-20T19:25:15,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:15,701 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:15,703 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:15,704 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/A is initiating minor compaction (all files) 2024-11-20T19:25:15,704 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/A in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:15,704 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/a354941f689f47858645ec80cd42014c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/357302b6d3ae457fb28d7f66ce904d2e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/cbdf3c2ff1294c36917b88c53c8d621f] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=35.5 K 2024-11-20T19:25:15,705 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting a354941f689f47858645ec80cd42014c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732130712258 2024-11-20T19:25:15,706 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 357302b6d3ae457fb28d7f66ce904d2e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732130712924 2024-11-20T19:25:15,707 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbdf3c2ff1294c36917b88c53c8d621f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732130714297 2024-11-20T19:25:15,713 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:15,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T19:25:15,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:15,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:15,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:15,715 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T19:25:15,715 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:15,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:15,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:15,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:15,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:15,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:15,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:15,717 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:15,717 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/B is initiating minor compaction (all files) 2024-11-20T19:25:15,717 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/B in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:15,717 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c49a1e93ecea4fcbbad6cf23436a8350, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/76f2496e541f4ee6a441cabc1aefe265, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/5287e990a5434af58ede6489e9ec6408] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=35.5 K 2024-11-20T19:25:15,718 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c49a1e93ecea4fcbbad6cf23436a8350, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732130712258 2024-11-20T19:25:15,720 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 76f2496e541f4ee6a441cabc1aefe265, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732130712924 2024-11-20T19:25:15,721 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5287e990a5434af58ede6489e9ec6408, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732130714297 2024-11-20T19:25:15,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:15,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:15,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:15,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:15,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/140683aba39643b0b83bee1845e3d708 is 50, key is test_row_0/A:col10/1732130714987/Put/seqid=0 2024-11-20T19:25:15,746 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#A#compaction#31 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:15,748 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/636d985cbb8146d79c6626aee8dd6836 is 50, key is test_row_0/A:col10/1732130714960/Put/seqid=0 2024-11-20T19:25:15,761 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#B#compaction#32 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:15,762 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/50202581cc0841a7a7fd1c0711265a2b is 50, key is test_row_0/B:col10/1732130714960/Put/seqid=0 2024-11-20T19:25:15,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T19:25:15,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741869_1045 (size=12151) 2024-11-20T19:25:15,834 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/140683aba39643b0b83bee1845e3d708 2024-11-20T19:25:15,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741870_1046 (size=12493) 2024-11-20T19:25:15,871 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/636d985cbb8146d79c6626aee8dd6836 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/636d985cbb8146d79c6626aee8dd6836 2024-11-20T19:25:15,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741871_1047 (size=12493) 2024-11-20T19:25:15,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/30834964aaa348debd4b6edd268e83ee is 50, key is test_row_0/B:col10/1732130714987/Put/seqid=0 2024-11-20T19:25:15,895 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/50202581cc0841a7a7fd1c0711265a2b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/50202581cc0841a7a7fd1c0711265a2b 2024-11-20T19:25:15,904 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/A of 6fb4967d0e6203ca72c498496394ce45 into 636d985cbb8146d79c6626aee8dd6836(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:15,904 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:15,905 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/A, priority=13, startTime=1732130715701; duration=0sec 2024-11-20T19:25:15,905 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:15,906 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:A 2024-11-20T19:25:15,906 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:15,910 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:15,911 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/C is initiating minor compaction (all files) 2024-11-20T19:25:15,911 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/C in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:15,911 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/7e9cb734ebbe4f79bd8659bd686569de, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/bf26af61a4ee4fb4809938fc033e44fd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/2c7f5bdc0c3a4659afa3a79386a172db] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=35.5 K 2024-11-20T19:25:15,912 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e9cb734ebbe4f79bd8659bd686569de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732130712258 2024-11-20T19:25:15,914 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf26af61a4ee4fb4809938fc033e44fd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732130712924 2024-11-20T19:25:15,915 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c7f5bdc0c3a4659afa3a79386a172db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732130714297 2024-11-20T19:25:15,920 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/B of 6fb4967d0e6203ca72c498496394ce45 into 50202581cc0841a7a7fd1c0711265a2b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:15,921 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:15,921 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/B, priority=13, startTime=1732130715715; duration=0sec 2024-11-20T19:25:15,921 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:15,921 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:B 2024-11-20T19:25:15,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741872_1048 (size=12151) 2024-11-20T19:25:15,969 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/30834964aaa348debd4b6edd268e83ee 2024-11-20T19:25:15,989 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#C#compaction#34 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:15,990 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/3e279a179bd64d719912d3533776464f is 50, key is test_row_0/C:col10/1732130714960/Put/seqid=0 2024-11-20T19:25:16,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/480efe4866f54b3ba0c4be8409e6c5a1 is 50, key is test_row_0/C:col10/1732130714987/Put/seqid=0 2024-11-20T19:25:16,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741873_1049 (size=12493) 2024-11-20T19:25:16,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741874_1050 (size=12151) 2024-11-20T19:25:16,088 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/3e279a179bd64d719912d3533776464f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/3e279a179bd64d719912d3533776464f 2024-11-20T19:25:16,107 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/C of 6fb4967d0e6203ca72c498496394ce45 into 3e279a179bd64d719912d3533776464f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:16,107 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:16,107 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/C, priority=13, startTime=1732130715723; duration=0sec 2024-11-20T19:25:16,107 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:16,107 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:C 2024-11-20T19:25:16,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:16,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:16,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130776234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130776237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130776234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T19:25:16,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130776340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130776343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130776354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,487 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/480efe4866f54b3ba0c4be8409e6c5a1 2024-11-20T19:25:16,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130776488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/140683aba39643b0b83bee1845e3d708 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/140683aba39643b0b83bee1845e3d708 2024-11-20T19:25:16,501 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4238 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., hostname=db9c3a6c6492,41229,1732130701496, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:16,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130776488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,503 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4241 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., hostname=db9c3a6c6492,41229,1732130701496, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:16,515 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/140683aba39643b0b83bee1845e3d708, entries=150, sequenceid=166, filesize=11.9 K 2024-11-20T19:25:16,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/30834964aaa348debd4b6edd268e83ee as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/30834964aaa348debd4b6edd268e83ee 2024-11-20T19:25:16,528 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/30834964aaa348debd4b6edd268e83ee, entries=150, sequenceid=166, filesize=11.9 K 2024-11-20T19:25:16,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/480efe4866f54b3ba0c4be8409e6c5a1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/480efe4866f54b3ba0c4be8409e6c5a1 2024-11-20T19:25:16,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130776548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,553 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/480efe4866f54b3ba0c4be8409e6c5a1, entries=150, sequenceid=166, filesize=11.9 K 2024-11-20T19:25:16,558 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 6fb4967d0e6203ca72c498496394ce45 in 844ms, sequenceid=166, compaction requested=false 2024-11-20T19:25:16,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:16,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:16,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-20T19:25:16,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-20T19:25:16,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-20T19:25:16,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3320 sec 2024-11-20T19:25:16,566 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.3400 sec 2024-11-20T19:25:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:16,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T19:25:16,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:16,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:16,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:16,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:16,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:16,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:16,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/fac3d9ae98b447f8a7141693a23d2801 is 50, key is test_row_0/A:col10/1732130716231/Put/seqid=0 2024-11-20T19:25:16,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130776616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130776608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741875_1051 (size=12151) 2024-11-20T19:25:16,640 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/fac3d9ae98b447f8a7141693a23d2801 2024-11-20T19:25:16,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/8b20dd05ad9840acb881eba5a78adebc is 50, key is test_row_0/B:col10/1732130716231/Put/seqid=0 2024-11-20T19:25:16,732 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130776725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130776728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741876_1052 (size=12151) 2024-11-20T19:25:16,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130776854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130776937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:16,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130776940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:17,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/8b20dd05ad9840acb881eba5a78adebc 2024-11-20T19:25:17,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/8f680650f4aa438b830ea3bec293d2be is 50, key is test_row_0/C:col10/1732130716231/Put/seqid=0 2024-11-20T19:25:17,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741877_1053 (size=12151) 2024-11-20T19:25:17,219 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/8f680650f4aa438b830ea3bec293d2be 2024-11-20T19:25:17,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/fac3d9ae98b447f8a7141693a23d2801 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/fac3d9ae98b447f8a7141693a23d2801 2024-11-20T19:25:17,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/fac3d9ae98b447f8a7141693a23d2801, entries=150, sequenceid=198, filesize=11.9 K 2024-11-20T19:25:17,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/8b20dd05ad9840acb881eba5a78adebc as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/8b20dd05ad9840acb881eba5a78adebc 2024-11-20T19:25:17,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/8b20dd05ad9840acb881eba5a78adebc, entries=150, sequenceid=198, filesize=11.9 K 2024-11-20T19:25:17,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130777249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:17,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/8f680650f4aa438b830ea3bec293d2be as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8f680650f4aa438b830ea3bec293d2be 2024-11-20T19:25:17,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130777249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:17,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8f680650f4aa438b830ea3bec293d2be, entries=150, sequenceid=198, filesize=11.9 K 2024-11-20T19:25:17,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 6fb4967d0e6203ca72c498496394ce45 in 688ms, sequenceid=198, compaction requested=true 2024-11-20T19:25:17,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:17,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:17,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:17,275 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:17,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:17,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:17,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:17,275 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:17,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:17,277 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:17,277 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:17,277 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/B is initiating minor compaction (all files) 2024-11-20T19:25:17,277 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/A is initiating minor compaction (all files) 2024-11-20T19:25:17,277 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/B in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:17,277 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/50202581cc0841a7a7fd1c0711265a2b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/30834964aaa348debd4b6edd268e83ee, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/8b20dd05ad9840acb881eba5a78adebc] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=35.9 K 2024-11-20T19:25:17,280 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/A in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:17,280 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/636d985cbb8146d79c6626aee8dd6836, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/140683aba39643b0b83bee1845e3d708, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/fac3d9ae98b447f8a7141693a23d2801] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=35.9 K 2024-11-20T19:25:17,281 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50202581cc0841a7a7fd1c0711265a2b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732130714297 2024-11-20T19:25:17,281 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30834964aaa348debd4b6edd268e83ee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732130714975 2024-11-20T19:25:17,282 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 636d985cbb8146d79c6626aee8dd6836, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732130714297 2024-11-20T19:25:17,282 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b20dd05ad9840acb881eba5a78adebc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732130716231 2024-11-20T19:25:17,283 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 140683aba39643b0b83bee1845e3d708, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732130714975 2024-11-20T19:25:17,285 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting fac3d9ae98b447f8a7141693a23d2801, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732130716231 2024-11-20T19:25:17,308 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#B#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:17,309 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/d5f72ab2071342bdba0979d31ac5be58 is 50, key is test_row_0/B:col10/1732130716231/Put/seqid=0 2024-11-20T19:25:17,320 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#A#compaction#40 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:17,321 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/83c4b08275c34cbabf20c4a0619da142 is 50, key is test_row_0/A:col10/1732130716231/Put/seqid=0 2024-11-20T19:25:17,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T19:25:17,335 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-20T19:25:17,338 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:17,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-20T19:25:17,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:17,343 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:17,344 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:17,344 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:17,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741878_1054 (size=12595) 2024-11-20T19:25:17,378 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/d5f72ab2071342bdba0979d31ac5be58 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d5f72ab2071342bdba0979d31ac5be58 2024-11-20T19:25:17,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:17,381 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:17,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:17,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:17,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:17,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:17,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:17,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:17,391 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/B of 6fb4967d0e6203ca72c498496394ce45 into d5f72ab2071342bdba0979d31ac5be58(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:17,391 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:17,391 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/B, priority=13, startTime=1732130717275; duration=0sec 2024-11-20T19:25:17,392 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:17,392 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:B 2024-11-20T19:25:17,392 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:17,396 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:17,396 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/C is initiating minor compaction (all files) 2024-11-20T19:25:17,397 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/C in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:17,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741879_1055 (size=12595) 2024-11-20T19:25:17,397 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/3e279a179bd64d719912d3533776464f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/480efe4866f54b3ba0c4be8409e6c5a1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8f680650f4aa438b830ea3bec293d2be] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=35.9 K 2024-11-20T19:25:17,398 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e279a179bd64d719912d3533776464f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732130714297 2024-11-20T19:25:17,398 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 480efe4866f54b3ba0c4be8409e6c5a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732130714975 2024-11-20T19:25:17,399 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f680650f4aa438b830ea3bec293d2be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732130716231 2024-11-20T19:25:17,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/ab422284e09349eb99e09ae8b54738c0 is 50, key is test_row_0/A:col10/1732130717372/Put/seqid=0 2024-11-20T19:25:17,412 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/83c4b08275c34cbabf20c4a0619da142 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/83c4b08275c34cbabf20c4a0619da142 2024-11-20T19:25:17,423 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/A of 6fb4967d0e6203ca72c498496394ce45 into 83c4b08275c34cbabf20c4a0619da142(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:17,424 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:17,424 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/A, priority=13, startTime=1732130717275; duration=0sec 2024-11-20T19:25:17,424 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:17,424 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:A 2024-11-20T19:25:17,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:17,445 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#C#compaction#42 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:17,446 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/adbd2ca3af0d4a77b4e025af6456a7be is 50, key is test_row_0/C:col10/1732130716231/Put/seqid=0 2024-11-20T19:25:17,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741880_1056 (size=14541) 2024-11-20T19:25:17,496 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:17,497 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:17,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:17,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:17,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:17,498 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:17,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:17,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:17,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741881_1057 (size=12595) 2024-11-20T19:25:17,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:17,656 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:17,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130777649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:17,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:17,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:17,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:17,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:17,660 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:17,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:17,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:17,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130777760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:17,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130777764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:17,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130777769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:17,813 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:17,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:17,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:17,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:17,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:17,814 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:17,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:17,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:17,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/ab422284e09349eb99e09ae8b54738c0 2024-11-20T19:25:17,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/2da7de442fbe4ba1a1ff1c847da4a815 is 50, key is test_row_0/B:col10/1732130717372/Put/seqid=0 2024-11-20T19:25:17,943 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/adbd2ca3af0d4a77b4e025af6456a7be as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/adbd2ca3af0d4a77b4e025af6456a7be 2024-11-20T19:25:17,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:17,963 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/C of 6fb4967d0e6203ca72c498496394ce45 into adbd2ca3af0d4a77b4e025af6456a7be(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:17,963 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:17,964 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/C, priority=13, startTime=1732130717275; duration=0sec 2024-11-20T19:25:17,964 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:17,964 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:C 2024-11-20T19:25:17,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130777966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:17,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741882_1058 (size=12151) 2024-11-20T19:25:17,971 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:17,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:17,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:17,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:17,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:17,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:17,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:17,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:17,977 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/2da7de442fbe4ba1a1ff1c847da4a815 2024-11-20T19:25:18,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/9545309004e04918aa5c5d7be9fd4d47 is 50, key is test_row_0/C:col10/1732130717372/Put/seqid=0 2024-11-20T19:25:18,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741883_1059 (size=12151) 2024-11-20T19:25:18,062 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/9545309004e04918aa5c5d7be9fd4d47 2024-11-20T19:25:18,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/ab422284e09349eb99e09ae8b54738c0 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/ab422284e09349eb99e09ae8b54738c0 2024-11-20T19:25:18,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/ab422284e09349eb99e09ae8b54738c0, entries=200, sequenceid=209, filesize=14.2 K 2024-11-20T19:25:18,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/2da7de442fbe4ba1a1ff1c847da4a815 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/2da7de442fbe4ba1a1ff1c847da4a815 2024-11-20T19:25:18,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/2da7de442fbe4ba1a1ff1c847da4a815, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T19:25:18,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/9545309004e04918aa5c5d7be9fd4d47 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/9545309004e04918aa5c5d7be9fd4d47 2024-11-20T19:25:18,127 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:18,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:18,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:18,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:18,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:18,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:18,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:18,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:18,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/9545309004e04918aa5c5d7be9fd4d47, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T19:25:18,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6fb4967d0e6203ca72c498496394ce45 in 758ms, sequenceid=209, compaction requested=false 2024-11-20T19:25:18,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:18,285 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:18,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:18,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:18,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:18,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:18,287 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:25:18,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:18,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:18,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:18,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:18,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:18,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:18,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/bef4bc06bfb84b6784dac5a582ff67b9 is 50, key is test_row_0/A:col10/1732130718280/Put/seqid=0 2024-11-20T19:25:18,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741884_1060 (size=14541) 2024-11-20T19:25:18,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130778404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:18,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130778536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:18,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130778747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:18,771 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/bef4bc06bfb84b6784dac5a582ff67b9 2024-11-20T19:25:18,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130778771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:18,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130778781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:18,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/52901557d2ba47c4874bb5585f0c12a2 is 50, key is test_row_0/B:col10/1732130718280/Put/seqid=0 2024-11-20T19:25:18,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741885_1061 (size=12151) 2024-11-20T19:25:19,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130779060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:19,274 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/52901557d2ba47c4874bb5585f0c12a2 2024-11-20T19:25:19,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/f4010a615d034a55b29bbbb6b981765b is 50, key is test_row_0/C:col10/1732130718280/Put/seqid=0 2024-11-20T19:25:19,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741886_1062 (size=12151) 2024-11-20T19:25:19,349 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/f4010a615d034a55b29bbbb6b981765b 2024-11-20T19:25:19,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/bef4bc06bfb84b6784dac5a582ff67b9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/bef4bc06bfb84b6784dac5a582ff67b9 2024-11-20T19:25:19,380 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/bef4bc06bfb84b6784dac5a582ff67b9, entries=200, sequenceid=238, filesize=14.2 K 2024-11-20T19:25:19,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/52901557d2ba47c4874bb5585f0c12a2 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/52901557d2ba47c4874bb5585f0c12a2 2024-11-20T19:25:19,399 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/52901557d2ba47c4874bb5585f0c12a2, entries=150, sequenceid=238, filesize=11.9 K 2024-11-20T19:25:19,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/f4010a615d034a55b29bbbb6b981765b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/f4010a615d034a55b29bbbb6b981765b 2024-11-20T19:25:19,424 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/f4010a615d034a55b29bbbb6b981765b, entries=150, sequenceid=238, filesize=11.9 K 2024-11-20T19:25:19,426 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6fb4967d0e6203ca72c498496394ce45 in 1139ms, sequenceid=238, compaction requested=true 2024-11-20T19:25:19,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:19,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:19,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-20T19:25:19,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-20T19:25:19,434 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-20T19:25:19,434 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0860 sec 2024-11-20T19:25:19,439 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 2.0970 sec 2024-11-20T19:25:19,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:19,448 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-20T19:25:19,450 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:19,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-20T19:25:19,455 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:19,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:19,456 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:19,456 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:19,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:19,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:19,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:19,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:19,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:19,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:19,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:19,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:19,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:19,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/72b9af5d7a9847c091d658824b71aec9 is 50, key is test_row_0/A:col10/1732130718382/Put/seqid=0 2024-11-20T19:25:19,608 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:19,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:19,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:19,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:19,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:19,613 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741887_1063 (size=14541) 2024-11-20T19:25:19,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/72b9af5d7a9847c091d658824b71aec9 2024-11-20T19:25:19,663 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/de6a7f1ec41f4575a9a584d3d4407c9a is 50, key is test_row_0/B:col10/1732130718382/Put/seqid=0 2024-11-20T19:25:19,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741888_1064 (size=12151) 2024-11-20T19:25:19,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/de6a7f1ec41f4575a9a584d3d4407c9a 2024-11-20T19:25:19,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/afa6c98285e445ce9a3351d1a898661f is 50, key is test_row_0/C:col10/1732130718382/Put/seqid=0 2024-11-20T19:25:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:19,766 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:19,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:19,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:19,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:19,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:19,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741889_1065 (size=12151) 2024-11-20T19:25:19,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/afa6c98285e445ce9a3351d1a898661f 2024-11-20T19:25:19,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/72b9af5d7a9847c091d658824b71aec9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/72b9af5d7a9847c091d658824b71aec9 2024-11-20T19:25:19,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/72b9af5d7a9847c091d658824b71aec9, entries=200, sequenceid=249, filesize=14.2 K 2024-11-20T19:25:19,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/de6a7f1ec41f4575a9a584d3d4407c9a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/de6a7f1ec41f4575a9a584d3d4407c9a 2024-11-20T19:25:19,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/de6a7f1ec41f4575a9a584d3d4407c9a, entries=150, sequenceid=249, filesize=11.9 K 2024-11-20T19:25:19,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/afa6c98285e445ce9a3351d1a898661f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/afa6c98285e445ce9a3351d1a898661f 2024-11-20T19:25:19,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/afa6c98285e445ce9a3351d1a898661f, entries=150, sequenceid=249, filesize=11.9 K 2024-11-20T19:25:19,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6fb4967d0e6203ca72c498496394ce45 in 274ms, sequenceid=249, compaction requested=true 2024-11-20T19:25:19,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:19,860 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:19,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:19,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:19,863 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56218 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:19,863 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:19,863 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/A is initiating minor compaction (all files) 2024-11-20T19:25:19,863 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/A in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:19,863 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/83c4b08275c34cbabf20c4a0619da142, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/ab422284e09349eb99e09ae8b54738c0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/bef4bc06bfb84b6784dac5a582ff67b9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/72b9af5d7a9847c091d658824b71aec9] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=54.9 K 2024-11-20T19:25:19,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:19,864 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83c4b08275c34cbabf20c4a0619da142, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732130716231 2024-11-20T19:25:19,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:19,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:19,864 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab422284e09349eb99e09ae8b54738c0, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732130716602 2024-11-20T19:25:19,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:19,865 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting bef4bc06bfb84b6784dac5a582ff67b9, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732130717607 2024-11-20T19:25:19,865 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:19,865 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/B is initiating minor compaction (all files) 2024-11-20T19:25:19,865 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/B in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:19,866 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d5f72ab2071342bdba0979d31ac5be58, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/2da7de442fbe4ba1a1ff1c847da4a815, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/52901557d2ba47c4874bb5585f0c12a2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/de6a7f1ec41f4575a9a584d3d4407c9a] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=47.9 K 2024-11-20T19:25:19,866 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72b9af5d7a9847c091d658824b71aec9, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732130718330 2024-11-20T19:25:19,867 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d5f72ab2071342bdba0979d31ac5be58, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732130716231 2024-11-20T19:25:19,868 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 2da7de442fbe4ba1a1ff1c847da4a815, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732130716606 2024-11-20T19:25:19,869 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 52901557d2ba47c4874bb5585f0c12a2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732130717607 2024-11-20T19:25:19,873 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting de6a7f1ec41f4575a9a584d3d4407c9a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732130718330 2024-11-20T19:25:19,879 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:25:19,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:19,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:19,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:19,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:19,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:19,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:19,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:19,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/8c207fb65f7849e09a18bb51cb81a803 is 50, key is test_row_0/A:col10/1732130719837/Put/seqid=0 2024-11-20T19:25:19,921 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:19,922 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#A#compaction#52 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:19,923 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/fb2eae7f5d2b4f55a3d3b76977e0f91d is 50, key is test_row_0/A:col10/1732130718382/Put/seqid=0 2024-11-20T19:25:19,923 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:19,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:19,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:19,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:19,924 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,934 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#B#compaction#53 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:19,935 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/1dd55620b24d424392fbe38e6362360c is 50, key is test_row_0/B:col10/1732130718382/Put/seqid=0 2024-11-20T19:25:19,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741890_1066 (size=12301) 2024-11-20T19:25:19,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:19,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130779980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741891_1067 (size=12731) 2024-11-20T19:25:20,023 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/fb2eae7f5d2b4f55a3d3b76977e0f91d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/fb2eae7f5d2b4f55a3d3b76977e0f91d 2024-11-20T19:25:20,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741892_1068 (size=12731) 2024-11-20T19:25:20,041 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/A of 6fb4967d0e6203ca72c498496394ce45 into fb2eae7f5d2b4f55a3d3b76977e0f91d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:20,041 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:20,042 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/A, priority=12, startTime=1732130719860; duration=0sec 2024-11-20T19:25:20,042 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:20,042 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:A 2024-11-20T19:25:20,042 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:20,047 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:20,047 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/C is initiating minor compaction (all files) 2024-11-20T19:25:20,047 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/C in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,047 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/adbd2ca3af0d4a77b4e025af6456a7be, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/9545309004e04918aa5c5d7be9fd4d47, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/f4010a615d034a55b29bbbb6b981765b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/afa6c98285e445ce9a3351d1a898661f] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=47.9 K 2024-11-20T19:25:20,049 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting adbd2ca3af0d4a77b4e025af6456a7be, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732130716231 2024-11-20T19:25:20,050 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9545309004e04918aa5c5d7be9fd4d47, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732130716606 2024-11-20T19:25:20,050 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4010a615d034a55b29bbbb6b981765b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732130717607 2024-11-20T19:25:20,053 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting afa6c98285e445ce9a3351d1a898661f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732130718330 2024-11-20T19:25:20,054 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/1dd55620b24d424392fbe38e6362360c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/1dd55620b24d424392fbe38e6362360c 2024-11-20T19:25:20,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:20,077 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,078 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/B of 6fb4967d0e6203ca72c498496394ce45 into 1dd55620b24d424392fbe38e6362360c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:20,078 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:20,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:20,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,081 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/B, priority=12, startTime=1732130719863; duration=0sec 2024-11-20T19:25:20,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:20,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,081 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,081 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:20,082 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:B 2024-11-20T19:25:20,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130780084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,113 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#C#compaction#54 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:20,114 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/1760466cd6e54817a579ff78ed5502ae is 50, key is test_row_0/C:col10/1732130718382/Put/seqid=0 2024-11-20T19:25:20,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741893_1069 (size=12731) 2024-11-20T19:25:20,179 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/1760466cd6e54817a579ff78ed5502ae as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/1760466cd6e54817a579ff78ed5502ae 2024-11-20T19:25:20,191 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/C of 6fb4967d0e6203ca72c498496394ce45 into 1760466cd6e54817a579ff78ed5502ae(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:20,191 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:20,191 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/C, priority=12, startTime=1732130719864; duration=0sec 2024-11-20T19:25:20,191 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:20,192 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:C 2024-11-20T19:25:20,247 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:20,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:20,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130780291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/8c207fb65f7849e09a18bb51cb81a803 2024-11-20T19:25:20,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/9ec2488527924a40abdfb8adcff8b529 is 50, key is test_row_0/B:col10/1732130719837/Put/seqid=0 2024-11-20T19:25:20,414 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:20,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:20,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,415 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741894_1070 (size=12301) 2024-11-20T19:25:20,460 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/9ec2488527924a40abdfb8adcff8b529 2024-11-20T19:25:20,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/1e2c17c45b7647bebb23891d7d8b88bd is 50, key is test_row_0/C:col10/1732130719837/Put/seqid=0 2024-11-20T19:25:20,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42042 deadline: 1732130780505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,513 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8251 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., hostname=db9c3a6c6492,41229,1732130701496, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:20,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42008 deadline: 1732130780540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,543 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8280 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., hostname=db9c3a6c6492,41229,1732130701496, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:20,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741895_1071 (size=12301) 2024-11-20T19:25:20,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/1e2c17c45b7647bebb23891d7d8b88bd 2024-11-20T19:25:20,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/8c207fb65f7849e09a18bb51cb81a803 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/8c207fb65f7849e09a18bb51cb81a803 2024-11-20T19:25:20,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:20,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/8c207fb65f7849e09a18bb51cb81a803, entries=150, sequenceid=275, filesize=12.0 K 2024-11-20T19:25:20,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/9ec2488527924a40abdfb8adcff8b529 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9ec2488527924a40abdfb8adcff8b529 2024-11-20T19:25:20,574 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9ec2488527924a40abdfb8adcff8b529, entries=150, sequenceid=275, filesize=12.0 K 2024-11-20T19:25:20,577 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:20,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:20,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/1e2c17c45b7647bebb23891d7d8b88bd as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/1e2c17c45b7647bebb23891d7d8b88bd 2024-11-20T19:25:20,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/1e2c17c45b7647bebb23891d7d8b88bd, entries=150, sequenceid=275, filesize=12.0 K 2024-11-20T19:25:20,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6fb4967d0e6203ca72c498496394ce45 in 711ms, sequenceid=275, compaction requested=false 2024-11-20T19:25:20,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:20,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:20,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:20,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:20,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:20,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:20,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:20,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:20,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:20,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/2c749935526846dcb7b4a212910da6ab is 50, key is test_row_0/A:col10/1732130719976/Put/seqid=0 2024-11-20T19:25:20,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741896_1072 (size=14741) 2024-11-20T19:25:20,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/2c749935526846dcb7b4a212910da6ab 2024-11-20T19:25:20,752 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:20,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:20,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/5a759e67462148469c5b82e2c830ab28 is 50, key is test_row_0/B:col10/1732130719976/Put/seqid=0 2024-11-20T19:25:20,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130780802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,811 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4202 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., hostname=db9c3a6c6492,41229,1732130701496, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:20,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130780807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,813 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130780809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741897_1073 (size=12301) 2024-11-20T19:25:20,818 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/5a759e67462148469c5b82e2c830ab28 2024-11-20T19:25:20,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/bdf70134cc9f4a40868112347e2e6ed7 is 50, key is test_row_0/C:col10/1732130719976/Put/seqid=0 2024-11-20T19:25:20,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741898_1074 (size=12301) 2024-11-20T19:25:20,910 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:20,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:20,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:20,911 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:20,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130780915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:20,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130780915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:21,065 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:21,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:21,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:21,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:21,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:21,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130781130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:21,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130781134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:21,219 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:21,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:21,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/bdf70134cc9f4a40868112347e2e6ed7 2024-11-20T19:25:21,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/2c749935526846dcb7b4a212910da6ab as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/2c749935526846dcb7b4a212910da6ab 2024-11-20T19:25:21,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/2c749935526846dcb7b4a212910da6ab, entries=200, sequenceid=289, filesize=14.4 K 2024-11-20T19:25:21,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/5a759e67462148469c5b82e2c830ab28 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/5a759e67462148469c5b82e2c830ab28 2024-11-20T19:25:21,335 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/5a759e67462148469c5b82e2c830ab28, entries=150, sequenceid=289, filesize=12.0 K 2024-11-20T19:25:21,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/bdf70134cc9f4a40868112347e2e6ed7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/bdf70134cc9f4a40868112347e2e6ed7 2024-11-20T19:25:21,350 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/bdf70134cc9f4a40868112347e2e6ed7, entries=150, sequenceid=289, filesize=12.0 K 2024-11-20T19:25:21,351 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6fb4967d0e6203ca72c498496394ce45 in 743ms, sequenceid=289, compaction requested=true 2024-11-20T19:25:21,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:21,351 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:21,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:21,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:21,352 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:21,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:21,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:21,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:21,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:21,354 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:21,354 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/A is initiating minor compaction (all files) 2024-11-20T19:25:21,354 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/A in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:21,355 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/fb2eae7f5d2b4f55a3d3b76977e0f91d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/8c207fb65f7849e09a18bb51cb81a803, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/2c749935526846dcb7b4a212910da6ab] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=38.8 K 2024-11-20T19:25:21,355 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:21,355 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting fb2eae7f5d2b4f55a3d3b76977e0f91d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732130718330 2024-11-20T19:25:21,355 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/B is initiating minor compaction (all files) 2024-11-20T19:25:21,355 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/B in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:21,356 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/1dd55620b24d424392fbe38e6362360c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9ec2488527924a40abdfb8adcff8b529, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/5a759e67462148469c5b82e2c830ab28] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=36.5 K 2024-11-20T19:25:21,356 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c207fb65f7849e09a18bb51cb81a803, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732130719837 2024-11-20T19:25:21,356 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1dd55620b24d424392fbe38e6362360c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732130718330 2024-11-20T19:25:21,356 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ec2488527924a40abdfb8adcff8b529, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732130719837 2024-11-20T19:25:21,356 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c749935526846dcb7b4a212910da6ab, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732130719907 2024-11-20T19:25:21,357 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a759e67462148469c5b82e2c830ab28, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732130719940 2024-11-20T19:25:21,372 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:21,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:21,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:21,374 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:21,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:21,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:21,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:21,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:21,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:21,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:21,382 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#B#compaction#60 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:21,383 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/e05a5766ae56431bae3e02b698befadd is 50, key is test_row_0/B:col10/1732130719976/Put/seqid=0 2024-11-20T19:25:21,391 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#A#compaction#61 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:21,396 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/0828581442354dafb84885dff8ad251b is 50, key is test_row_0/A:col10/1732130719976/Put/seqid=0 2024-11-20T19:25:21,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/023c9d0dfb4f4a6a8ed3dca392ae5784 is 50, key is test_row_0/A:col10/1732130720806/Put/seqid=0 2024-11-20T19:25:21,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:21,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741899_1075 (size=12983) 2024-11-20T19:25:21,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130781493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:21,505 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/e05a5766ae56431bae3e02b698befadd as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/e05a5766ae56431bae3e02b698befadd 2024-11-20T19:25:21,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741900_1076 (size=12983) 2024-11-20T19:25:21,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130781500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:21,520 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/B of 6fb4967d0e6203ca72c498496394ce45 into e05a5766ae56431bae3e02b698befadd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:21,520 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:21,520 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/B, priority=13, startTime=1732130721352; duration=0sec 2024-11-20T19:25:21,521 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:21,521 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:B 2024-11-20T19:25:21,521 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:21,523 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/0828581442354dafb84885dff8ad251b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/0828581442354dafb84885dff8ad251b 2024-11-20T19:25:21,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741901_1077 (size=12301) 2024-11-20T19:25:21,524 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/023c9d0dfb4f4a6a8ed3dca392ae5784 2024-11-20T19:25:21,524 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:21,524 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/C is initiating minor compaction (all files) 2024-11-20T19:25:21,525 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/C in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:21,525 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/1760466cd6e54817a579ff78ed5502ae, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/1e2c17c45b7647bebb23891d7d8b88bd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/bdf70134cc9f4a40868112347e2e6ed7] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=36.5 K 2024-11-20T19:25:21,527 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1760466cd6e54817a579ff78ed5502ae, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732130718330 2024-11-20T19:25:21,528 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e2c17c45b7647bebb23891d7d8b88bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732130719837 2024-11-20T19:25:21,528 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdf70134cc9f4a40868112347e2e6ed7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732130719940 2024-11-20T19:25:21,532 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/A of 6fb4967d0e6203ca72c498496394ce45 into 0828581442354dafb84885dff8ad251b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:21,533 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:21,533 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/A, priority=13, startTime=1732130721351; duration=0sec 2024-11-20T19:25:21,533 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:21,533 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:A 2024-11-20T19:25:21,546 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#C#compaction#64 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:21,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/05cc33bf50464240a3683946b1531808 is 50, key is test_row_0/B:col10/1732130720806/Put/seqid=0 2024-11-20T19:25:21,547 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/881db46712a54edca531afd673ae3af8 is 50, key is test_row_0/C:col10/1732130719976/Put/seqid=0 2024-11-20T19:25:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:21,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130781604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:21,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741902_1078 (size=12301) 2024-11-20T19:25:21,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741903_1079 (size=12983) 2024-11-20T19:25:21,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130781613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:21,634 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/881db46712a54edca531afd673ae3af8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/881db46712a54edca531afd673ae3af8 2024-11-20T19:25:21,650 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/C of 6fb4967d0e6203ca72c498496394ce45 into 881db46712a54edca531afd673ae3af8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:21,650 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:21,650 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/C, priority=13, startTime=1732130721353; duration=0sec 2024-11-20T19:25:21,650 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:21,651 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:C 2024-11-20T19:25:21,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130781808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:21,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130781820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:22,014 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/05cc33bf50464240a3683946b1531808 2024-11-20T19:25:22,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/8438f8b64a0540239315166e35752c3e is 50, key is test_row_0/C:col10/1732130720806/Put/seqid=0 2024-11-20T19:25:22,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741904_1080 (size=12301) 2024-11-20T19:25:22,089 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/8438f8b64a0540239315166e35752c3e 2024-11-20T19:25:22,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/023c9d0dfb4f4a6a8ed3dca392ae5784 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/023c9d0dfb4f4a6a8ed3dca392ae5784 2024-11-20T19:25:22,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:22,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130782117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:22,122 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/023c9d0dfb4f4a6a8ed3dca392ae5784, entries=150, sequenceid=314, filesize=12.0 K 2024-11-20T19:25:22,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/05cc33bf50464240a3683946b1531808 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/05cc33bf50464240a3683946b1531808 2024-11-20T19:25:22,132 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/05cc33bf50464240a3683946b1531808, entries=150, sequenceid=314, filesize=12.0 K 2024-11-20T19:25:22,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/8438f8b64a0540239315166e35752c3e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8438f8b64a0540239315166e35752c3e 2024-11-20T19:25:22,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:22,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130782126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:22,152 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8438f8b64a0540239315166e35752c3e, entries=150, sequenceid=314, filesize=12.0 K 2024-11-20T19:25:22,158 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6fb4967d0e6203ca72c498496394ce45 in 784ms, sequenceid=314, compaction requested=false 2024-11-20T19:25:22,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:22,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:22,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-20T19:25:22,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-20T19:25:22,164 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-20T19:25:22,164 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7040 sec 2024-11-20T19:25:22,168 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 2.7150 sec 2024-11-20T19:25:22,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:22,646 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:25:22,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:22,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:22,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:22,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:22,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:22,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:22,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/84121d90906740cb840bf27caaa2459e is 50, key is test_row_0/A:col10/1732130721482/Put/seqid=0 2024-11-20T19:25:22,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741905_1081 (size=14741) 2024-11-20T19:25:22,733 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/84121d90906740cb840bf27caaa2459e 2024-11-20T19:25:22,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/9a4d0cc80078457d8e1e9ca6b5ba6adc is 50, key is test_row_0/B:col10/1732130721482/Put/seqid=0 2024-11-20T19:25:22,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:22,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130782758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:22,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:22,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130782763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:22,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741906_1082 (size=12301) 2024-11-20T19:25:22,805 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/9a4d0cc80078457d8e1e9ca6b5ba6adc 2024-11-20T19:25:22,834 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/5fed7ee08a1a42e581ad41b37789a7c2 is 50, key is test_row_0/C:col10/1732130721482/Put/seqid=0 2024-11-20T19:25:22,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:22,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130782865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:22,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:22,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130782880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:22,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741907_1083 (size=12301) 2024-11-20T19:25:23,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130783081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:23,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130783088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:23,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/5fed7ee08a1a42e581ad41b37789a7c2 2024-11-20T19:25:23,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/84121d90906740cb840bf27caaa2459e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/84121d90906740cb840bf27caaa2459e 2024-11-20T19:25:23,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/84121d90906740cb840bf27caaa2459e, entries=200, sequenceid=329, filesize=14.4 K 2024-11-20T19:25:23,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/9a4d0cc80078457d8e1e9ca6b5ba6adc as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9a4d0cc80078457d8e1e9ca6b5ba6adc 2024-11-20T19:25:23,328 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9a4d0cc80078457d8e1e9ca6b5ba6adc, entries=150, sequenceid=329, filesize=12.0 K 2024-11-20T19:25:23,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/5fed7ee08a1a42e581ad41b37789a7c2 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5fed7ee08a1a42e581ad41b37789a7c2 2024-11-20T19:25:23,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5fed7ee08a1a42e581ad41b37789a7c2, entries=150, sequenceid=329, filesize=12.0 K 2024-11-20T19:25:23,339 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6fb4967d0e6203ca72c498496394ce45 in 693ms, sequenceid=329, compaction requested=true 2024-11-20T19:25:23,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:23,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:23,339 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:23,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:23,340 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:23,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:23,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:23,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:23,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:23,341 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:23,341 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/A is initiating minor compaction (all files) 2024-11-20T19:25:23,341 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/A in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:23,342 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/0828581442354dafb84885dff8ad251b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/023c9d0dfb4f4a6a8ed3dca392ae5784, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/84121d90906740cb840bf27caaa2459e] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=39.1 K 2024-11-20T19:25:23,342 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:23,342 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/B is initiating minor compaction (all files) 2024-11-20T19:25:23,342 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/B in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:23,342 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0828581442354dafb84885dff8ad251b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732130719940 2024-11-20T19:25:23,342 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/e05a5766ae56431bae3e02b698befadd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/05cc33bf50464240a3683946b1531808, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9a4d0cc80078457d8e1e9ca6b5ba6adc] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=36.7 K 2024-11-20T19:25:23,342 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 023c9d0dfb4f4a6a8ed3dca392ae5784, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732130720798 2024-11-20T19:25:23,343 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting e05a5766ae56431bae3e02b698befadd, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732130719940 2024-11-20T19:25:23,343 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84121d90906740cb840bf27caaa2459e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732130721482 2024-11-20T19:25:23,343 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 05cc33bf50464240a3683946b1531808, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732130720798 2024-11-20T19:25:23,344 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a4d0cc80078457d8e1e9ca6b5ba6adc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732130721482 2024-11-20T19:25:23,385 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#A#compaction#69 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:23,386 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/da805af037684089ae43ac3b8e69b7e9 is 50, key is test_row_0/A:col10/1732130721482/Put/seqid=0 2024-11-20T19:25:23,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:23,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:23,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:23,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:23,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:23,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:23,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:23,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:23,402 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#B#compaction#70 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:23,403 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/d44c75dd835247c987a012fa1bc7cd14 is 50, key is test_row_0/B:col10/1732130721482/Put/seqid=0 2024-11-20T19:25:23,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/84844fd05cba42b3bcd621943f94f9dd is 50, key is test_row_0/A:col10/1732130722736/Put/seqid=0 2024-11-20T19:25:23,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130783439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:23,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130783441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:23,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741908_1084 (size=13085) 2024-11-20T19:25:23,474 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/da805af037684089ae43ac3b8e69b7e9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/da805af037684089ae43ac3b8e69b7e9 2024-11-20T19:25:23,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741909_1085 (size=13085) 2024-11-20T19:25:23,485 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/A of 6fb4967d0e6203ca72c498496394ce45 into da805af037684089ae43ac3b8e69b7e9(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:23,485 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:23,485 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/A, priority=13, startTime=1732130723339; duration=0sec 2024-11-20T19:25:23,485 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:23,485 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:A 2024-11-20T19:25:23,485 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:23,489 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:23,489 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/C is initiating minor compaction (all files) 2024-11-20T19:25:23,489 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/C in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:23,491 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/881db46712a54edca531afd673ae3af8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8438f8b64a0540239315166e35752c3e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5fed7ee08a1a42e581ad41b37789a7c2] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=36.7 K 2024-11-20T19:25:23,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741910_1086 (size=14741) 2024-11-20T19:25:23,493 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 881db46712a54edca531afd673ae3af8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732130719940 2024-11-20T19:25:23,496 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/84844fd05cba42b3bcd621943f94f9dd 2024-11-20T19:25:23,497 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/d44c75dd835247c987a012fa1bc7cd14 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d44c75dd835247c987a012fa1bc7cd14 2024-11-20T19:25:23,497 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8438f8b64a0540239315166e35752c3e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732130720798 2024-11-20T19:25:23,500 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fed7ee08a1a42e581ad41b37789a7c2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732130721482 2024-11-20T19:25:23,510 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/B of 6fb4967d0e6203ca72c498496394ce45 into d44c75dd835247c987a012fa1bc7cd14(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:23,510 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:23,511 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/B, priority=13, startTime=1732130723340; duration=0sec 2024-11-20T19:25:23,511 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:23,511 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:B 2024-11-20T19:25:23,515 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/fa3da9cca51d4ccf97b2c32e9fcbec85 is 50, key is test_row_0/B:col10/1732130722736/Put/seqid=0 2024-11-20T19:25:23,522 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#C#compaction#73 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:23,523 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/d2ef9ecddf1d48e697384ad884cbdfcd is 50, key is test_row_0/C:col10/1732130721482/Put/seqid=0 2024-11-20T19:25:23,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130783544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:23,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130783548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:23,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741912_1088 (size=13085) 2024-11-20T19:25:23,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:23,565 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-20T19:25:23,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741911_1087 (size=12301) 2024-11-20T19:25:23,567 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-20T19:25:23,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T19:25:23,570 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:23,572 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:23,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:23,573 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/d2ef9ecddf1d48e697384ad884cbdfcd as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/d2ef9ecddf1d48e697384ad884cbdfcd 2024-11-20T19:25:23,586 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/C of 6fb4967d0e6203ca72c498496394ce45 into d2ef9ecddf1d48e697384ad884cbdfcd(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:23,586 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:23,586 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/C, priority=13, startTime=1732130723340; duration=0sec 2024-11-20T19:25:23,586 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:23,586 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:C 2024-11-20T19:25:23,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T19:25:23,727 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:23,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T19:25:23,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:23,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:23,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:23,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130783755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:23,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130783756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:23,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T19:25:23,885 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:23,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T19:25:23,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:23,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:23,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:23,886 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/fa3da9cca51d4ccf97b2c32e9fcbec85 2024-11-20T19:25:23,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/5e868d198ae247b0aef3bcd6b064a58b is 50, key is test_row_0/C:col10/1732130722736/Put/seqid=0 2024-11-20T19:25:24,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741913_1089 (size=12301) 2024-11-20T19:25:24,043 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:24,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T19:25:24,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:24,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:24,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:24,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:24,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:24,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:24,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130784062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:24,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130784063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:24,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T19:25:24,201 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:24,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T19:25:24,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:24,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:24,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:24,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:24,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:24,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:24,359 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:24,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T19:25:24,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:24,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:24,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:24,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:24,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:24,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/5e868d198ae247b0aef3bcd6b064a58b 2024-11-20T19:25:24,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/84844fd05cba42b3bcd621943f94f9dd as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/84844fd05cba42b3bcd621943f94f9dd 2024-11-20T19:25:24,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/84844fd05cba42b3bcd621943f94f9dd, entries=200, sequenceid=354, filesize=14.4 K 2024-11-20T19:25:24,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/fa3da9cca51d4ccf97b2c32e9fcbec85 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fa3da9cca51d4ccf97b2c32e9fcbec85 2024-11-20T19:25:24,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fa3da9cca51d4ccf97b2c32e9fcbec85, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T19:25:24,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/5e868d198ae247b0aef3bcd6b064a58b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5e868d198ae247b0aef3bcd6b064a58b 2024-11-20T19:25:24,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5e868d198ae247b0aef3bcd6b064a58b, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T19:25:24,479 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6fb4967d0e6203ca72c498496394ce45 in 1083ms, sequenceid=354, compaction requested=false 2024-11-20T19:25:24,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:24,514 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:24,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T19:25:24,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:24,515 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:24,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:24,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:24,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:24,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:24,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:24,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:24,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/98ba338397814a65bb503252614fc0ab is 50, key is test_row_1/A:col10/1732130723429/Put/seqid=0 2024-11-20T19:25:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:24,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:24,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741914_1090 (size=9857) 2024-11-20T19:25:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T19:25:24,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130784691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:24,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130784698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:24,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130784800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:24,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130784801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:24,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42022 deadline: 1732130784837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:24,841 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8233 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., hostname=db9c3a6c6492,41229,1732130701496, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:24,983 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/98ba338397814a65bb503252614fc0ab 2024-11-20T19:25:25,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/c683c394252d438ca276ba3dd3482baf is 50, key is test_row_1/B:col10/1732130723429/Put/seqid=0 2024-11-20T19:25:25,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130785011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:25,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130785011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:25,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741915_1091 (size=9857) 2024-11-20T19:25:25,057 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/c683c394252d438ca276ba3dd3482baf 2024-11-20T19:25:25,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/38dbd65bd82d498c985c9bb4675c26f6 is 50, key is test_row_1/C:col10/1732130723429/Put/seqid=0 2024-11-20T19:25:25,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741916_1092 (size=9857) 2024-11-20T19:25:25,124 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/38dbd65bd82d498c985c9bb4675c26f6 2024-11-20T19:25:25,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/98ba338397814a65bb503252614fc0ab as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/98ba338397814a65bb503252614fc0ab 2024-11-20T19:25:25,145 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/98ba338397814a65bb503252614fc0ab, entries=100, sequenceid=368, filesize=9.6 K 2024-11-20T19:25:25,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/c683c394252d438ca276ba3dd3482baf as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c683c394252d438ca276ba3dd3482baf 2024-11-20T19:25:25,157 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c683c394252d438ca276ba3dd3482baf, entries=100, sequenceid=368, filesize=9.6 K 2024-11-20T19:25:25,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/38dbd65bd82d498c985c9bb4675c26f6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/38dbd65bd82d498c985c9bb4675c26f6 2024-11-20T19:25:25,167 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/38dbd65bd82d498c985c9bb4675c26f6, entries=100, sequenceid=368, filesize=9.6 K 2024-11-20T19:25:25,169 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 6fb4967d0e6203ca72c498496394ce45 in 654ms, sequenceid=368, compaction requested=true 2024-11-20T19:25:25,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:25,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:25,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-20T19:25:25,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-20T19:25:25,173 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-20T19:25:25,173 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5990 sec 2024-11-20T19:25:25,175 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.6070 sec 2024-11-20T19:25:25,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T19:25:25,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:25,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:25,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:25,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:25,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:25,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:25,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:25,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/4d900035cc2c42c69a4291ce00d858c5 is 50, key is test_row_0/A:col10/1732130725319/Put/seqid=0 2024-11-20T19:25:25,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130785352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:25,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130785357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:25,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741917_1093 (size=14741) 2024-11-20T19:25:25,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130785463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:25,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130785464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:25,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130785667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:25,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130785667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T19:25:25,677 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-20T19:25:25,680 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-20T19:25:25,683 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:25,684 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:25,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T19:25:25,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T19:25:25,811 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/4d900035cc2c42c69a4291ce00d858c5 2024-11-20T19:25:25,837 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:25,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/1e7c1affb01d45cfa65b903ea1bcf704 is 50, key is test_row_0/B:col10/1732130725319/Put/seqid=0 2024-11-20T19:25:25,842 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T19:25:25,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:25,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:25,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:25,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:25,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:25,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741918_1094 (size=12301) 2024-11-20T19:25:25,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130785976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:25,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130785978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:25,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T19:25:25,997 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:25,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T19:25:25,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:25,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:25,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:25,998 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:25,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,151 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:26,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T19:25:26,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:26,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T19:25:26,313 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:26,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T19:25:26,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:26,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,316 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/1e7c1affb01d45cfa65b903ea1bcf704 2024-11-20T19:25:26,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,349 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/58dfde063b1d43528788e732ac5c7f00 is 50, key is test_row_0/C:col10/1732130725319/Put/seqid=0 2024-11-20T19:25:26,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741919_1095 (size=12301) 2024-11-20T19:25:26,474 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:26,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T19:25:26,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:26,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:26,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130786490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:26,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:26,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130786497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:26,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:26,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T19:25:26,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:26,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,780 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/58dfde063b1d43528788e732ac5c7f00 2024-11-20T19:25:26,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/4d900035cc2c42c69a4291ce00d858c5 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/4d900035cc2c42c69a4291ce00d858c5 2024-11-20T19:25:26,790 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:26,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T19:25:26,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:26,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T19:25:26,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/4d900035cc2c42c69a4291ce00d858c5, entries=200, sequenceid=396, filesize=14.4 K 2024-11-20T19:25:26,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/1e7c1affb01d45cfa65b903ea1bcf704 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/1e7c1affb01d45cfa65b903ea1bcf704 2024-11-20T19:25:26,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/1e7c1affb01d45cfa65b903ea1bcf704, entries=150, sequenceid=396, filesize=12.0 K 2024-11-20T19:25:26,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/58dfde063b1d43528788e732ac5c7f00 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/58dfde063b1d43528788e732ac5c7f00 2024-11-20T19:25:26,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/58dfde063b1d43528788e732ac5c7f00, entries=150, sequenceid=396, filesize=12.0 K 2024-11-20T19:25:26,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 6fb4967d0e6203ca72c498496394ce45 in 1509ms, sequenceid=396, compaction requested=true 2024-11-20T19:25:26,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:26,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:26,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:26,831 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:26,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:26,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:26,831 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:26,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:26,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:26,833 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:26,833 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/B is initiating minor compaction (all files) 2024-11-20T19:25:26,834 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/B in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,834 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d44c75dd835247c987a012fa1bc7cd14, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fa3da9cca51d4ccf97b2c32e9fcbec85, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c683c394252d438ca276ba3dd3482baf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/1e7c1affb01d45cfa65b903ea1bcf704] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=46.4 K 2024-11-20T19:25:26,834 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52424 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:26,834 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/A is initiating minor compaction (all files) 2024-11-20T19:25:26,834 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/A in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,834 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/da805af037684089ae43ac3b8e69b7e9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/84844fd05cba42b3bcd621943f94f9dd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/98ba338397814a65bb503252614fc0ab, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/4d900035cc2c42c69a4291ce00d858c5] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=51.2 K 2024-11-20T19:25:26,835 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d44c75dd835247c987a012fa1bc7cd14, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732130721482 2024-11-20T19:25:26,835 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting da805af037684089ae43ac3b8e69b7e9, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732130721482 2024-11-20T19:25:26,836 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84844fd05cba42b3bcd621943f94f9dd, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732130722733 2024-11-20T19:25:26,836 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting fa3da9cca51d4ccf97b2c32e9fcbec85, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732130722736 2024-11-20T19:25:26,837 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98ba338397814a65bb503252614fc0ab, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732130723429 2024-11-20T19:25:26,837 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c683c394252d438ca276ba3dd3482baf, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732130723429 2024-11-20T19:25:26,838 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d900035cc2c42c69a4291ce00d858c5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732130724653 2024-11-20T19:25:26,838 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e7c1affb01d45cfa65b903ea1bcf704, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732130724680 2024-11-20T19:25:26,873 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#B#compaction#81 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:26,874 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/e548380cc5d14494a392d42412b67f6f is 50, key is test_row_0/B:col10/1732130725319/Put/seqid=0 2024-11-20T19:25:26,884 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#A#compaction#82 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:26,885 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/eddc074391f444fd872166707a47960d is 50, key is test_row_0/A:col10/1732130725319/Put/seqid=0 2024-11-20T19:25:26,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741920_1096 (size=13221) 2024-11-20T19:25:26,945 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:26,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T19:25:26,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,946 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T19:25:26,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:26,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:26,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:26,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:26,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:26,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:26,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741921_1097 (size=13221) 2024-11-20T19:25:26,961 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/eddc074391f444fd872166707a47960d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/eddc074391f444fd872166707a47960d 2024-11-20T19:25:26,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/e83771e664d741ec89395031ca92126d is 50, key is test_row_0/A:col10/1732130725339/Put/seqid=0 2024-11-20T19:25:26,980 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/A of 6fb4967d0e6203ca72c498496394ce45 into eddc074391f444fd872166707a47960d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:26,980 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:26,980 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/A, priority=12, startTime=1732130726830; duration=0sec 2024-11-20T19:25:26,980 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:26,980 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:A 2024-11-20T19:25:26,980 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:26,983 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:26,983 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/C is initiating minor compaction (all files) 2024-11-20T19:25:26,983 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/C in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:26,983 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/d2ef9ecddf1d48e697384ad884cbdfcd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5e868d198ae247b0aef3bcd6b064a58b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/38dbd65bd82d498c985c9bb4675c26f6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/58dfde063b1d43528788e732ac5c7f00] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=46.4 K 2024-11-20T19:25:26,984 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2ef9ecddf1d48e697384ad884cbdfcd, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732130721482 2024-11-20T19:25:26,984 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e868d198ae247b0aef3bcd6b064a58b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732130722736 2024-11-20T19:25:26,987 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38dbd65bd82d498c985c9bb4675c26f6, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732130723429 2024-11-20T19:25:26,988 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58dfde063b1d43528788e732ac5c7f00, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732130724680 2024-11-20T19:25:27,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741922_1098 (size=12301) 2024-11-20T19:25:27,017 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/e83771e664d741ec89395031ca92126d 2024-11-20T19:25:27,019 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#C#compaction#84 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:27,020 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/156c8568892b4c0ba685d27aef190839 is 50, key is test_row_0/C:col10/1732130725319/Put/seqid=0 2024-11-20T19:25:27,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/69c3f41331104ed28eba4b8537c8f059 is 50, key is test_row_0/B:col10/1732130725339/Put/seqid=0 2024-11-20T19:25:27,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741923_1099 (size=13221) 2024-11-20T19:25:27,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741924_1100 (size=12301) 2024-11-20T19:25:27,126 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/69c3f41331104ed28eba4b8537c8f059 2024-11-20T19:25:27,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/c82e2953125c4333b21270c7cb8422e2 is 50, key is test_row_0/C:col10/1732130725339/Put/seqid=0 2024-11-20T19:25:27,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741925_1101 (size=12301) 2024-11-20T19:25:27,346 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/e548380cc5d14494a392d42412b67f6f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/e548380cc5d14494a392d42412b67f6f 2024-11-20T19:25:27,365 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/B of 6fb4967d0e6203ca72c498496394ce45 into e548380cc5d14494a392d42412b67f6f(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:27,365 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:27,365 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/B, priority=12, startTime=1732130726831; duration=0sec 2024-11-20T19:25:27,366 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:27,366 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:B 2024-11-20T19:25:27,507 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/156c8568892b4c0ba685d27aef190839 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/156c8568892b4c0ba685d27aef190839 2024-11-20T19:25:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:27,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:27,528 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/C of 6fb4967d0e6203ca72c498496394ce45 into 156c8568892b4c0ba685d27aef190839(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:27,530 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:27,532 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/C, priority=12, startTime=1732130726831; duration=0sec 2024-11-20T19:25:27,532 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:27,532 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:C 2024-11-20T19:25:27,597 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/c82e2953125c4333b21270c7cb8422e2 2024-11-20T19:25:27,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/e83771e664d741ec89395031ca92126d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/e83771e664d741ec89395031ca92126d 2024-11-20T19:25:27,621 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/e83771e664d741ec89395031ca92126d, entries=150, sequenceid=404, filesize=12.0 K 2024-11-20T19:25:27,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/69c3f41331104ed28eba4b8537c8f059 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/69c3f41331104ed28eba4b8537c8f059 2024-11-20T19:25:27,635 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/69c3f41331104ed28eba4b8537c8f059, entries=150, sequenceid=404, filesize=12.0 K 2024-11-20T19:25:27,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/c82e2953125c4333b21270c7cb8422e2 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/c82e2953125c4333b21270c7cb8422e2 2024-11-20T19:25:27,649 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/c82e2953125c4333b21270c7cb8422e2, entries=150, sequenceid=404, filesize=12.0 K 2024-11-20T19:25:27,651 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=161.02 KB/164880 for 6fb4967d0e6203ca72c498496394ce45 in 704ms, sequenceid=404, compaction requested=false 2024-11-20T19:25:27,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:27,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:27,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-20T19:25:27,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-20T19:25:27,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-20T19:25:27,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9690 sec 2024-11-20T19:25:27,659 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.9760 sec 2024-11-20T19:25:27,660 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T19:25:27,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:27,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:27,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:27,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:27,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:27,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:27,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:27,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/c91433b27c9e444f9a1a5a5f81f278ca is 50, key is test_row_0/A:col10/1732130727658/Put/seqid=0 2024-11-20T19:25:27,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 300 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130787677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:27,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130787686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:27,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741926_1102 (size=14741) 2024-11-20T19:25:27,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/c91433b27c9e444f9a1a5a5f81f278ca 2024-11-20T19:25:27,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/a8a19fa5e9d94a339d8c90c225b0385a is 50, key is test_row_0/B:col10/1732130727658/Put/seqid=0 2024-11-20T19:25:27,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741927_1103 (size=12301) 2024-11-20T19:25:27,784 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/a8a19fa5e9d94a339d8c90c225b0385a 2024-11-20T19:25:27,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 302 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130787784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:27,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T19:25:27,795 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-20T19:25:27,797 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:27,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-20T19:25:27,799 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:27,801 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:27,801 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:27,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T19:25:27,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130787798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:27,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/38b5804e9ed8482aaa61f142aff7579d is 50, key is test_row_0/C:col10/1732130727658/Put/seqid=0 2024-11-20T19:25:27,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741928_1104 (size=12301) 2024-11-20T19:25:27,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T19:25:27,958 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:27,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T19:25:27,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:27,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:27,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:27,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 304 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130787995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:28,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:28,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42036 deadline: 1732130788009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:28,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T19:25:28,116 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:28,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T19:25:28,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:28,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:28,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T19:25:28,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:28,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/38b5804e9ed8482aaa61f142aff7579d 2024-11-20T19:25:28,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/c91433b27c9e444f9a1a5a5f81f278ca as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/c91433b27c9e444f9a1a5a5f81f278ca 2024-11-20T19:25:28,292 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/c91433b27c9e444f9a1a5a5f81f278ca, entries=200, sequenceid=435, filesize=14.4 K 2024-11-20T19:25:28,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/a8a19fa5e9d94a339d8c90c225b0385a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/a8a19fa5e9d94a339d8c90c225b0385a 2024-11-20T19:25:28,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:28,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 306 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130788299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:28,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/a8a19fa5e9d94a339d8c90c225b0385a, entries=150, sequenceid=435, filesize=12.0 K 2024-11-20T19:25:28,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/38b5804e9ed8482aaa61f142aff7579d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/38b5804e9ed8482aaa61f142aff7579d 2024-11-20T19:25:28,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/38b5804e9ed8482aaa61f142aff7579d, entries=150, sequenceid=435, filesize=12.0 K 2024-11-20T19:25:28,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 6fb4967d0e6203ca72c498496394ce45 in 662ms, sequenceid=435, compaction requested=true 2024-11-20T19:25:28,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:28,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:28,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:28,323 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:28,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:28,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:28,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fb4967d0e6203ca72c498496394ce45:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:28,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:25:28,323 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:28,327 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40263 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:28,327 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/A is initiating minor compaction (all files) 2024-11-20T19:25:28,327 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/A in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,327 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/eddc074391f444fd872166707a47960d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/e83771e664d741ec89395031ca92126d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/c91433b27c9e444f9a1a5a5f81f278ca] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=39.3 K 2024-11-20T19:25:28,327 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:28,328 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/B is initiating minor compaction (all files) 2024-11-20T19:25:28,328 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/B in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,328 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/e548380cc5d14494a392d42412b67f6f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/69c3f41331104ed28eba4b8537c8f059, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/a8a19fa5e9d94a339d8c90c225b0385a] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=36.9 K 2024-11-20T19:25:28,329 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting e548380cc5d14494a392d42412b67f6f, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732130724680 2024-11-20T19:25:28,332 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 69c3f41331104ed28eba4b8537c8f059, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1732130725328 2024-11-20T19:25:28,332 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting eddc074391f444fd872166707a47960d, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732130724680 2024-11-20T19:25:28,333 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting e83771e664d741ec89395031ca92126d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1732130725328 2024-11-20T19:25:28,333 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a8a19fa5e9d94a339d8c90c225b0385a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732130727574 2024-11-20T19:25:28,334 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting c91433b27c9e444f9a1a5a5f81f278ca, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732130727574 2024-11-20T19:25:28,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:28,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:28,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:28,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:28,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:28,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:28,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:28,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:28,349 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/ab8ea8b1dd7743ca8c7b58870cd9efaf is 50, key is test_row_0/A:col10/1732130728334/Put/seqid=0 2024-11-20T19:25:28,355 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#A#compaction#91 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:28,357 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/95e72c140a5c4b4ca342ffff876b5ebb is 50, key is test_row_0/A:col10/1732130727658/Put/seqid=0 2024-11-20T19:25:28,367 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#B#compaction#92 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:28,367 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/fd45133542f848ae8bf77f1fcb24da7e is 50, key is test_row_0/B:col10/1732130727658/Put/seqid=0 2024-11-20T19:25:28,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T19:25:28,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741929_1105 (size=14741) 2024-11-20T19:25:28,424 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:28,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T19:25:28,424 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/ab8ea8b1dd7743ca8c7b58870cd9efaf 2024-11-20T19:25:28,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:28,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741930_1106 (size=13323) 2024-11-20T19:25:28,454 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/95e72c140a5c4b4ca342ffff876b5ebb as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/95e72c140a5c4b4ca342ffff876b5ebb 2024-11-20T19:25:28,471 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/A of 6fb4967d0e6203ca72c498496394ce45 into 95e72c140a5c4b4ca342ffff876b5ebb(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:28,471 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:28,471 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/A, priority=13, startTime=1732130728322; duration=0sec 2024-11-20T19:25:28,471 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:28,471 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:A 2024-11-20T19:25:28,471 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:28,474 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:28,474 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6fb4967d0e6203ca72c498496394ce45/C is initiating minor compaction (all files) 2024-11-20T19:25:28,474 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6fb4967d0e6203ca72c498496394ce45/C in TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,474 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/156c8568892b4c0ba685d27aef190839, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/c82e2953125c4333b21270c7cb8422e2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/38b5804e9ed8482aaa61f142aff7579d] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp, totalSize=36.9 K 2024-11-20T19:25:28,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741931_1107 (size=13323) 2024-11-20T19:25:28,475 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 156c8568892b4c0ba685d27aef190839, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732130724680 2024-11-20T19:25:28,475 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting c82e2953125c4333b21270c7cb8422e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1732130725328 2024-11-20T19:25:28,476 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38b5804e9ed8482aaa61f142aff7579d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732130727574 2024-11-20T19:25:28,483 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/d2cd77b50a614795ab27459764c19911 is 50, key is test_row_0/B:col10/1732130728334/Put/seqid=0 2024-11-20T19:25:28,498 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fb4967d0e6203ca72c498496394ce45#C#compaction#94 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:28,499 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/92b2e09a373c47559cfd3512b7ce2a74 is 50, key is test_row_0/C:col10/1732130727658/Put/seqid=0 2024-11-20T19:25:28,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741933_1109 (size=13323) 2024-11-20T19:25:28,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741932_1108 (size=12301) 2024-11-20T19:25:28,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/d2cd77b50a614795ab27459764c19911 2024-11-20T19:25:28,540 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10c964e8 to 127.0.0.1:49985 2024-11-20T19:25:28,540 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:28,550 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c8de680 to 127.0.0.1:49985 2024-11-20T19:25:28,550 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:28,552 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04977266 to 127.0.0.1:49985 2024-11-20T19:25:28,553 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:28,555 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/92b2e09a373c47559cfd3512b7ce2a74 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/92b2e09a373c47559cfd3512b7ce2a74 2024-11-20T19:25:28,558 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x527c6d40 to 127.0.0.1:49985 2024-11-20T19:25:28,558 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:28,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/787dec72e95d4cb7bf2ca25c7cfe3d81 is 50, key is test_row_0/C:col10/1732130728334/Put/seqid=0 2024-11-20T19:25:28,570 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72e97e4b to 127.0.0.1:49985 2024-11-20T19:25:28,570 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:28,574 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/C of 6fb4967d0e6203ca72c498496394ce45 into 92b2e09a373c47559cfd3512b7ce2a74(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:28,574 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:28,574 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/C, priority=13, startTime=1732130728323; duration=0sec 2024-11-20T19:25:28,574 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:28,574 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:C 2024-11-20T19:25:28,578 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:28,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T19:25:28,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:28,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741934_1110 (size=12301) 2024-11-20T19:25:28,734 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:28,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T19:25:28,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:28,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,736 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:28,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 308 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42054 deadline: 1732130788805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:28,889 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:28,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T19:25:28,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:28,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:28,890 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,896 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/fd45133542f848ae8bf77f1fcb24da7e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fd45133542f848ae8bf77f1fcb24da7e 2024-11-20T19:25:28,904 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6fb4967d0e6203ca72c498496394ce45/B of 6fb4967d0e6203ca72c498496394ce45 into fd45133542f848ae8bf77f1fcb24da7e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:28,904 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:28,905 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45., storeName=6fb4967d0e6203ca72c498496394ce45/B, priority=13, startTime=1732130728323; duration=0sec 2024-11-20T19:25:28,905 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:28,905 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fb4967d0e6203ca72c498496394ce45:B 2024-11-20T19:25:28,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T19:25:28,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/787dec72e95d4cb7bf2ca25c7cfe3d81 2024-11-20T19:25:29,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/ab8ea8b1dd7743ca8c7b58870cd9efaf as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/ab8ea8b1dd7743ca8c7b58870cd9efaf 2024-11-20T19:25:29,009 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/ab8ea8b1dd7743ca8c7b58870cd9efaf, entries=200, sequenceid=446, filesize=14.4 K 2024-11-20T19:25:29,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/d2cd77b50a614795ab27459764c19911 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d2cd77b50a614795ab27459764c19911 2024-11-20T19:25:29,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d2cd77b50a614795ab27459764c19911, entries=150, sequenceid=446, filesize=12.0 K 2024-11-20T19:25:29,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/787dec72e95d4cb7bf2ca25c7cfe3d81 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/787dec72e95d4cb7bf2ca25c7cfe3d81 2024-11-20T19:25:29,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/787dec72e95d4cb7bf2ca25c7cfe3d81, entries=150, sequenceid=446, filesize=12.0 K 2024-11-20T19:25:29,032 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6fb4967d0e6203ca72c498496394ce45 in 697ms, sequenceid=446, compaction requested=false 2024-11-20T19:25:29,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:29,044 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:29,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T19:25:29,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:29,046 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:29,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:29,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:29,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:29,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:29,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:29,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/bccc0791273e4535bf91f66bd65b01ef is 50, key is test_row_0/A:col10/1732130728549/Put/seqid=0 2024-11-20T19:25:29,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741935_1111 (size=12301) 2024-11-20T19:25:29,470 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/bccc0791273e4535bf91f66bd65b01ef 2024-11-20T19:25:29,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/c52ce36c6f22414c9e191378db56859a is 50, key is test_row_0/B:col10/1732130728549/Put/seqid=0 2024-11-20T19:25:29,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741936_1112 (size=12301) 2024-11-20T19:25:29,773 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T19:25:29,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:29,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. as already flushing 2024-11-20T19:25:29,812 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5095ba91 to 127.0.0.1:49985 2024-11-20T19:25:29,812 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:29,887 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/c52ce36c6f22414c9e191378db56859a 2024-11-20T19:25:29,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/a35868935bcd4e76837f31c42ed8f687 is 50, key is test_row_0/C:col10/1732130728549/Put/seqid=0 2024-11-20T19:25:29,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T19:25:29,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741937_1113 (size=12301) 2024-11-20T19:25:30,313 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/a35868935bcd4e76837f31c42ed8f687 2024-11-20T19:25:30,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/bccc0791273e4535bf91f66bd65b01ef as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/bccc0791273e4535bf91f66bd65b01ef 2024-11-20T19:25:30,338 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/bccc0791273e4535bf91f66bd65b01ef, entries=150, sequenceid=474, filesize=12.0 K 2024-11-20T19:25:30,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/c52ce36c6f22414c9e191378db56859a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c52ce36c6f22414c9e191378db56859a 2024-11-20T19:25:30,346 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c52ce36c6f22414c9e191378db56859a, entries=150, sequenceid=474, filesize=12.0 K 2024-11-20T19:25:30,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/a35868935bcd4e76837f31c42ed8f687 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/a35868935bcd4e76837f31c42ed8f687 2024-11-20T19:25:30,359 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/a35868935bcd4e76837f31c42ed8f687, entries=150, sequenceid=474, filesize=12.0 K 2024-11-20T19:25:30,360 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=6.71 KB/6870 for 6fb4967d0e6203ca72c498496394ce45 in 1315ms, sequenceid=474, compaction requested=true 2024-11-20T19:25:30,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:30,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:30,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-20T19:25:30,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-20T19:25:30,363 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-20T19:25:30,363 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5600 sec 2024-11-20T19:25:30,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 2.5670 sec 2024-11-20T19:25:30,588 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12885408 to 127.0.0.1:49985 2024-11-20T19:25:30,588 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:30,613 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62c43377 to 127.0.0.1:49985 2024-11-20T19:25:30,613 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:31,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T19:25:31,909 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-20T19:25:34,855 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a8f4734 to 127.0.0.1:49985 2024-11-20T19:25:34,855 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:34,855 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T19:25:34,855 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 180 2024-11-20T19:25:34,855 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 20 2024-11-20T19:25:34,855 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 19 2024-11-20T19:25:34,856 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 118 2024-11-20T19:25:34,856 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-11-20T19:25:34,856 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T19:25:34,856 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2664 2024-11-20T19:25:34,856 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2490 2024-11-20T19:25:34,856 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T19:25:34,856 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1257 2024-11-20T19:25:34,856 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3767 rows 2024-11-20T19:25:34,856 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1264 2024-11-20T19:25:34,856 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3791 rows 2024-11-20T19:25:34,856 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:25:34,856 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e67f019 to 127.0.0.1:49985 2024-11-20T19:25:34,856 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:34,859 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T19:25:34,863 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T19:25:34,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:34,868 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130734868"}]},"ts":"1732130734868"} 2024-11-20T19:25:34,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T19:25:34,869 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T19:25:34,913 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T19:25:34,915 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:25:34,920 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6fb4967d0e6203ca72c498496394ce45, UNASSIGN}] 2024-11-20T19:25:34,921 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6fb4967d0e6203ca72c498496394ce45, UNASSIGN 2024-11-20T19:25:34,922 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=6fb4967d0e6203ca72c498496394ce45, regionState=CLOSING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:34,924 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:25:34,924 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; CloseRegionProcedure 6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:25:34,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T19:25:35,083 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:35,086 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(124): Close 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:35,087 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:25:35,088 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1681): Closing 6fb4967d0e6203ca72c498496394ce45, disabling compactions & flushes 2024-11-20T19:25:35,088 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:35,088 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:35,088 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. after waiting 0 ms 2024-11-20T19:25:35,088 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:35,088 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(2837): Flushing 6fb4967d0e6203ca72c498496394ce45 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T19:25:35,088 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=A 2024-11-20T19:25:35,089 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:35,089 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=B 2024-11-20T19:25:35,089 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:35,089 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6fb4967d0e6203ca72c498496394ce45, store=C 2024-11-20T19:25:35,089 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:35,095 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/5c4b9c2798454cb7927cca64e5f8d296 is 50, key is test_row_1/A:col10/1732130734853/Put/seqid=0 2024-11-20T19:25:35,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741938_1114 (size=7415) 2024-11-20T19:25:35,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T19:25:35,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T19:25:35,501 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/5c4b9c2798454cb7927cca64e5f8d296 2024-11-20T19:25:35,516 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/080e4e58797e4624b7b48e24b5a3b51c is 50, key is test_row_1/B:col10/1732130734853/Put/seqid=0 2024-11-20T19:25:35,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741939_1115 (size=7415) 2024-11-20T19:25:35,887 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T19:25:35,889 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40870, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T19:25:35,921 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/080e4e58797e4624b7b48e24b5a3b51c 2024-11-20T19:25:35,934 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/5a5211c5b6be448d87ba4c0830ac86a9 is 50, key is test_row_1/C:col10/1732130734853/Put/seqid=0 2024-11-20T19:25:35,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741940_1116 (size=7415) 2024-11-20T19:25:35,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T19:25:36,342 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/5a5211c5b6be448d87ba4c0830ac86a9 2024-11-20T19:25:36,353 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/A/5c4b9c2798454cb7927cca64e5f8d296 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/5c4b9c2798454cb7927cca64e5f8d296 2024-11-20T19:25:36,360 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/5c4b9c2798454cb7927cca64e5f8d296, entries=50, sequenceid=481, filesize=7.2 K 2024-11-20T19:25:36,361 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/B/080e4e58797e4624b7b48e24b5a3b51c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/080e4e58797e4624b7b48e24b5a3b51c 2024-11-20T19:25:36,366 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/080e4e58797e4624b7b48e24b5a3b51c, entries=50, sequenceid=481, filesize=7.2 K 2024-11-20T19:25:36,367 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/.tmp/C/5a5211c5b6be448d87ba4c0830ac86a9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5a5211c5b6be448d87ba4c0830ac86a9 2024-11-20T19:25:36,371 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5a5211c5b6be448d87ba4c0830ac86a9, entries=50, sequenceid=481, filesize=7.2 K 2024-11-20T19:25:36,372 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 6fb4967d0e6203ca72c498496394ce45 in 1284ms, sequenceid=481, compaction requested=true 2024-11-20T19:25:36,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/0d816f012458428889902f64c2d797f5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/4b2bb5d3999b49f8989f9be04f92cfb1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/1b5e0a8dd28244959843e0c12bf928de, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/7beb14e34462404a871e3436c9133f11, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/c28a6f8404f54976abfff8e804970842, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/088d47e8de1c49e185e4562227adcf75, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/a354941f689f47858645ec80cd42014c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/5ee6f65607574231bebafbe6ce1910ef, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/357302b6d3ae457fb28d7f66ce904d2e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/636d985cbb8146d79c6626aee8dd6836, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/cbdf3c2ff1294c36917b88c53c8d621f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/140683aba39643b0b83bee1845e3d708, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/83c4b08275c34cbabf20c4a0619da142, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/fac3d9ae98b447f8a7141693a23d2801, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/ab422284e09349eb99e09ae8b54738c0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/bef4bc06bfb84b6784dac5a582ff67b9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/72b9af5d7a9847c091d658824b71aec9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/fb2eae7f5d2b4f55a3d3b76977e0f91d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/8c207fb65f7849e09a18bb51cb81a803, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/2c749935526846dcb7b4a212910da6ab, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/0828581442354dafb84885dff8ad251b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/023c9d0dfb4f4a6a8ed3dca392ae5784, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/84121d90906740cb840bf27caaa2459e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/da805af037684089ae43ac3b8e69b7e9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/84844fd05cba42b3bcd621943f94f9dd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/98ba338397814a65bb503252614fc0ab, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/4d900035cc2c42c69a4291ce00d858c5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/eddc074391f444fd872166707a47960d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/e83771e664d741ec89395031ca92126d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/c91433b27c9e444f9a1a5a5f81f278ca] to archive 2024-11-20T19:25:36,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:25:36,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/0d816f012458428889902f64c2d797f5 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/0d816f012458428889902f64c2d797f5 2024-11-20T19:25:36,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/4b2bb5d3999b49f8989f9be04f92cfb1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/4b2bb5d3999b49f8989f9be04f92cfb1 2024-11-20T19:25:36,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/1b5e0a8dd28244959843e0c12bf928de to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/1b5e0a8dd28244959843e0c12bf928de 2024-11-20T19:25:36,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/7beb14e34462404a871e3436c9133f11 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/7beb14e34462404a871e3436c9133f11 2024-11-20T19:25:36,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/c28a6f8404f54976abfff8e804970842 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/c28a6f8404f54976abfff8e804970842 2024-11-20T19:25:36,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/088d47e8de1c49e185e4562227adcf75 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/088d47e8de1c49e185e4562227adcf75 2024-11-20T19:25:36,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/a354941f689f47858645ec80cd42014c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/a354941f689f47858645ec80cd42014c 2024-11-20T19:25:36,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/5ee6f65607574231bebafbe6ce1910ef to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/5ee6f65607574231bebafbe6ce1910ef 2024-11-20T19:25:36,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/357302b6d3ae457fb28d7f66ce904d2e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/357302b6d3ae457fb28d7f66ce904d2e 2024-11-20T19:25:36,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/636d985cbb8146d79c6626aee8dd6836 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/636d985cbb8146d79c6626aee8dd6836 2024-11-20T19:25:36,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/cbdf3c2ff1294c36917b88c53c8d621f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/cbdf3c2ff1294c36917b88c53c8d621f 2024-11-20T19:25:36,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/140683aba39643b0b83bee1845e3d708 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/140683aba39643b0b83bee1845e3d708 2024-11-20T19:25:36,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/83c4b08275c34cbabf20c4a0619da142 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/83c4b08275c34cbabf20c4a0619da142 2024-11-20T19:25:36,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/fac3d9ae98b447f8a7141693a23d2801 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/fac3d9ae98b447f8a7141693a23d2801 2024-11-20T19:25:36,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/ab422284e09349eb99e09ae8b54738c0 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/ab422284e09349eb99e09ae8b54738c0 2024-11-20T19:25:36,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/bef4bc06bfb84b6784dac5a582ff67b9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/bef4bc06bfb84b6784dac5a582ff67b9 2024-11-20T19:25:36,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/72b9af5d7a9847c091d658824b71aec9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/72b9af5d7a9847c091d658824b71aec9 2024-11-20T19:25:36,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/fb2eae7f5d2b4f55a3d3b76977e0f91d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/fb2eae7f5d2b4f55a3d3b76977e0f91d 2024-11-20T19:25:36,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/8c207fb65f7849e09a18bb51cb81a803 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/8c207fb65f7849e09a18bb51cb81a803 2024-11-20T19:25:36,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/2c749935526846dcb7b4a212910da6ab to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/2c749935526846dcb7b4a212910da6ab 2024-11-20T19:25:36,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/0828581442354dafb84885dff8ad251b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/0828581442354dafb84885dff8ad251b 2024-11-20T19:25:36,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/023c9d0dfb4f4a6a8ed3dca392ae5784 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/023c9d0dfb4f4a6a8ed3dca392ae5784 2024-11-20T19:25:36,407 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/84121d90906740cb840bf27caaa2459e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/84121d90906740cb840bf27caaa2459e 2024-11-20T19:25:36,409 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/da805af037684089ae43ac3b8e69b7e9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/da805af037684089ae43ac3b8e69b7e9 2024-11-20T19:25:36,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/84844fd05cba42b3bcd621943f94f9dd to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/84844fd05cba42b3bcd621943f94f9dd 2024-11-20T19:25:36,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/98ba338397814a65bb503252614fc0ab to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/98ba338397814a65bb503252614fc0ab 2024-11-20T19:25:36,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/4d900035cc2c42c69a4291ce00d858c5 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/4d900035cc2c42c69a4291ce00d858c5 2024-11-20T19:25:36,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/eddc074391f444fd872166707a47960d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/eddc074391f444fd872166707a47960d 2024-11-20T19:25:36,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/e83771e664d741ec89395031ca92126d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/e83771e664d741ec89395031ca92126d 2024-11-20T19:25:36,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/c91433b27c9e444f9a1a5a5f81f278ca to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/c91433b27c9e444f9a1a5a5f81f278ca 2024-11-20T19:25:36,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/a0deb564ac854e7a838d058b63c00d9b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/99649a47c7e7422b97db68b0c9ae66ec, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/f1df051742d44e8db8ad75ddb12f151e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/4bfb23c97eea404ca818d658227a7da9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fc0e6e713249499283df9b901ee7c881, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9c240517dac44740a074f90bb0d17708, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c49a1e93ecea4fcbbad6cf23436a8350, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/57f25b51db134126b8cf2e447672dbad, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/76f2496e541f4ee6a441cabc1aefe265, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/50202581cc0841a7a7fd1c0711265a2b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/5287e990a5434af58ede6489e9ec6408, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/30834964aaa348debd4b6edd268e83ee, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d5f72ab2071342bdba0979d31ac5be58, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/8b20dd05ad9840acb881eba5a78adebc, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/2da7de442fbe4ba1a1ff1c847da4a815, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/52901557d2ba47c4874bb5585f0c12a2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/1dd55620b24d424392fbe38e6362360c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/de6a7f1ec41f4575a9a584d3d4407c9a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9ec2488527924a40abdfb8adcff8b529, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/e05a5766ae56431bae3e02b698befadd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/5a759e67462148469c5b82e2c830ab28, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/05cc33bf50464240a3683946b1531808, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d44c75dd835247c987a012fa1bc7cd14, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9a4d0cc80078457d8e1e9ca6b5ba6adc, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fa3da9cca51d4ccf97b2c32e9fcbec85, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c683c394252d438ca276ba3dd3482baf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/e548380cc5d14494a392d42412b67f6f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/1e7c1affb01d45cfa65b903ea1bcf704, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/69c3f41331104ed28eba4b8537c8f059, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/a8a19fa5e9d94a339d8c90c225b0385a] to archive 2024-11-20T19:25:36,445 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:25:36,447 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/a0deb564ac854e7a838d058b63c00d9b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/a0deb564ac854e7a838d058b63c00d9b 2024-11-20T19:25:36,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/99649a47c7e7422b97db68b0c9ae66ec to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/99649a47c7e7422b97db68b0c9ae66ec 2024-11-20T19:25:36,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/f1df051742d44e8db8ad75ddb12f151e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/f1df051742d44e8db8ad75ddb12f151e 2024-11-20T19:25:36,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/4bfb23c97eea404ca818d658227a7da9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/4bfb23c97eea404ca818d658227a7da9 2024-11-20T19:25:36,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fc0e6e713249499283df9b901ee7c881 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fc0e6e713249499283df9b901ee7c881 2024-11-20T19:25:36,455 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9c240517dac44740a074f90bb0d17708 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9c240517dac44740a074f90bb0d17708 2024-11-20T19:25:36,456 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c49a1e93ecea4fcbbad6cf23436a8350 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c49a1e93ecea4fcbbad6cf23436a8350 2024-11-20T19:25:36,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/57f25b51db134126b8cf2e447672dbad to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/57f25b51db134126b8cf2e447672dbad 2024-11-20T19:25:36,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/76f2496e541f4ee6a441cabc1aefe265 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/76f2496e541f4ee6a441cabc1aefe265 2024-11-20T19:25:36,460 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/50202581cc0841a7a7fd1c0711265a2b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/50202581cc0841a7a7fd1c0711265a2b 2024-11-20T19:25:36,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/5287e990a5434af58ede6489e9ec6408 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/5287e990a5434af58ede6489e9ec6408 2024-11-20T19:25:36,462 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/30834964aaa348debd4b6edd268e83ee to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/30834964aaa348debd4b6edd268e83ee 2024-11-20T19:25:36,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d5f72ab2071342bdba0979d31ac5be58 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d5f72ab2071342bdba0979d31ac5be58 2024-11-20T19:25:36,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/8b20dd05ad9840acb881eba5a78adebc to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/8b20dd05ad9840acb881eba5a78adebc 2024-11-20T19:25:36,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/2da7de442fbe4ba1a1ff1c847da4a815 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/2da7de442fbe4ba1a1ff1c847da4a815 2024-11-20T19:25:36,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/52901557d2ba47c4874bb5585f0c12a2 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/52901557d2ba47c4874bb5585f0c12a2 2024-11-20T19:25:36,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/1dd55620b24d424392fbe38e6362360c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/1dd55620b24d424392fbe38e6362360c 2024-11-20T19:25:36,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/de6a7f1ec41f4575a9a584d3d4407c9a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/de6a7f1ec41f4575a9a584d3d4407c9a 2024-11-20T19:25:36,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9ec2488527924a40abdfb8adcff8b529 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9ec2488527924a40abdfb8adcff8b529 2024-11-20T19:25:36,472 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/e05a5766ae56431bae3e02b698befadd to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/e05a5766ae56431bae3e02b698befadd 2024-11-20T19:25:36,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/5a759e67462148469c5b82e2c830ab28 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/5a759e67462148469c5b82e2c830ab28 2024-11-20T19:25:36,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/05cc33bf50464240a3683946b1531808 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/05cc33bf50464240a3683946b1531808 2024-11-20T19:25:36,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d44c75dd835247c987a012fa1bc7cd14 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d44c75dd835247c987a012fa1bc7cd14 2024-11-20T19:25:36,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9a4d0cc80078457d8e1e9ca6b5ba6adc to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/9a4d0cc80078457d8e1e9ca6b5ba6adc 2024-11-20T19:25:36,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fa3da9cca51d4ccf97b2c32e9fcbec85 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fa3da9cca51d4ccf97b2c32e9fcbec85 2024-11-20T19:25:36,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c683c394252d438ca276ba3dd3482baf to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c683c394252d438ca276ba3dd3482baf 2024-11-20T19:25:36,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/e548380cc5d14494a392d42412b67f6f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/e548380cc5d14494a392d42412b67f6f 2024-11-20T19:25:36,486 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/1e7c1affb01d45cfa65b903ea1bcf704 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/1e7c1affb01d45cfa65b903ea1bcf704 2024-11-20T19:25:36,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/69c3f41331104ed28eba4b8537c8f059 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/69c3f41331104ed28eba4b8537c8f059 2024-11-20T19:25:36,489 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/a8a19fa5e9d94a339d8c90c225b0385a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/a8a19fa5e9d94a339d8c90c225b0385a 2024-11-20T19:25:36,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/22df1258abbf4d8f817635f99e10d4ab, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/fedfcd10b5c0477383ba68355a96b65b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/a116e1c98d36455ea2a8c3d1313c5c5d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/3b8457b0bdc249adbbb0ef7fc36c376b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/eba9057af49445bca438a75750ef786a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8314e20ce6dc43faac73fd822d7196d9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/7e9cb734ebbe4f79bd8659bd686569de, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/c1886967b8f04b64821f3c0edd8cc2e0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/bf26af61a4ee4fb4809938fc033e44fd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/3e279a179bd64d719912d3533776464f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/2c7f5bdc0c3a4659afa3a79386a172db, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/480efe4866f54b3ba0c4be8409e6c5a1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/adbd2ca3af0d4a77b4e025af6456a7be, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8f680650f4aa438b830ea3bec293d2be, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/9545309004e04918aa5c5d7be9fd4d47, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/f4010a615d034a55b29bbbb6b981765b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/1760466cd6e54817a579ff78ed5502ae, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/afa6c98285e445ce9a3351d1a898661f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/1e2c17c45b7647bebb23891d7d8b88bd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/881db46712a54edca531afd673ae3af8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/bdf70134cc9f4a40868112347e2e6ed7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8438f8b64a0540239315166e35752c3e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/d2ef9ecddf1d48e697384ad884cbdfcd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5fed7ee08a1a42e581ad41b37789a7c2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5e868d198ae247b0aef3bcd6b064a58b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/38dbd65bd82d498c985c9bb4675c26f6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/156c8568892b4c0ba685d27aef190839, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/58dfde063b1d43528788e732ac5c7f00, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/c82e2953125c4333b21270c7cb8422e2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/38b5804e9ed8482aaa61f142aff7579d] to archive 2024-11-20T19:25:36,492 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:25:36,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/22df1258abbf4d8f817635f99e10d4ab to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/22df1258abbf4d8f817635f99e10d4ab 2024-11-20T19:25:36,496 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/fedfcd10b5c0477383ba68355a96b65b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/fedfcd10b5c0477383ba68355a96b65b 2024-11-20T19:25:36,497 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/a116e1c98d36455ea2a8c3d1313c5c5d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/a116e1c98d36455ea2a8c3d1313c5c5d 2024-11-20T19:25:36,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/3b8457b0bdc249adbbb0ef7fc36c376b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/3b8457b0bdc249adbbb0ef7fc36c376b 2024-11-20T19:25:36,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/eba9057af49445bca438a75750ef786a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/eba9057af49445bca438a75750ef786a 2024-11-20T19:25:36,502 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8314e20ce6dc43faac73fd822d7196d9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8314e20ce6dc43faac73fd822d7196d9 2024-11-20T19:25:36,503 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/7e9cb734ebbe4f79bd8659bd686569de to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/7e9cb734ebbe4f79bd8659bd686569de 2024-11-20T19:25:36,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/c1886967b8f04b64821f3c0edd8cc2e0 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/c1886967b8f04b64821f3c0edd8cc2e0 2024-11-20T19:25:36,505 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/bf26af61a4ee4fb4809938fc033e44fd to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/bf26af61a4ee4fb4809938fc033e44fd 2024-11-20T19:25:36,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/3e279a179bd64d719912d3533776464f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/3e279a179bd64d719912d3533776464f 2024-11-20T19:25:36,508 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/2c7f5bdc0c3a4659afa3a79386a172db to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/2c7f5bdc0c3a4659afa3a79386a172db 2024-11-20T19:25:36,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/480efe4866f54b3ba0c4be8409e6c5a1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/480efe4866f54b3ba0c4be8409e6c5a1 2024-11-20T19:25:36,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/adbd2ca3af0d4a77b4e025af6456a7be to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/adbd2ca3af0d4a77b4e025af6456a7be 2024-11-20T19:25:36,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8f680650f4aa438b830ea3bec293d2be to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8f680650f4aa438b830ea3bec293d2be 2024-11-20T19:25:36,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/9545309004e04918aa5c5d7be9fd4d47 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/9545309004e04918aa5c5d7be9fd4d47 2024-11-20T19:25:36,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/f4010a615d034a55b29bbbb6b981765b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/f4010a615d034a55b29bbbb6b981765b 2024-11-20T19:25:36,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/1760466cd6e54817a579ff78ed5502ae to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/1760466cd6e54817a579ff78ed5502ae 2024-11-20T19:25:36,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/afa6c98285e445ce9a3351d1a898661f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/afa6c98285e445ce9a3351d1a898661f 2024-11-20T19:25:36,516 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/1e2c17c45b7647bebb23891d7d8b88bd to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/1e2c17c45b7647bebb23891d7d8b88bd 2024-11-20T19:25:36,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/881db46712a54edca531afd673ae3af8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/881db46712a54edca531afd673ae3af8 2024-11-20T19:25:36,518 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/bdf70134cc9f4a40868112347e2e6ed7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/bdf70134cc9f4a40868112347e2e6ed7 2024-11-20T19:25:36,519 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8438f8b64a0540239315166e35752c3e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/8438f8b64a0540239315166e35752c3e 2024-11-20T19:25:36,520 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/d2ef9ecddf1d48e697384ad884cbdfcd to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/d2ef9ecddf1d48e697384ad884cbdfcd 2024-11-20T19:25:36,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5fed7ee08a1a42e581ad41b37789a7c2 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5fed7ee08a1a42e581ad41b37789a7c2 2024-11-20T19:25:36,522 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5e868d198ae247b0aef3bcd6b064a58b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5e868d198ae247b0aef3bcd6b064a58b 2024-11-20T19:25:36,524 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/38dbd65bd82d498c985c9bb4675c26f6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/38dbd65bd82d498c985c9bb4675c26f6 2024-11-20T19:25:36,525 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/156c8568892b4c0ba685d27aef190839 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/156c8568892b4c0ba685d27aef190839 2024-11-20T19:25:36,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/58dfde063b1d43528788e732ac5c7f00 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/58dfde063b1d43528788e732ac5c7f00 2024-11-20T19:25:36,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/c82e2953125c4333b21270c7cb8422e2 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/c82e2953125c4333b21270c7cb8422e2 2024-11-20T19:25:36,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/38b5804e9ed8482aaa61f142aff7579d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/38b5804e9ed8482aaa61f142aff7579d 2024-11-20T19:25:36,535 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/recovered.edits/484.seqid, newMaxSeqId=484, maxSeqId=1 2024-11-20T19:25:36,538 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45. 2024-11-20T19:25:36,538 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1635): Region close journal for 6fb4967d0e6203ca72c498496394ce45: 2024-11-20T19:25:36,540 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(170): Closed 6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:36,540 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=6fb4967d0e6203ca72c498496394ce45, regionState=CLOSED 2024-11-20T19:25:36,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-20T19:25:36,544 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseRegionProcedure 6fb4967d0e6203ca72c498496394ce45, server=db9c3a6c6492,41229,1732130701496 in 1.6170 sec 2024-11-20T19:25:36,544 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-11-20T19:25:36,544 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6fb4967d0e6203ca72c498496394ce45, UNASSIGN in 1.6230 sec 2024-11-20T19:25:36,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-20T19:25:36,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6300 sec 2024-11-20T19:25:36,548 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130736548"}]},"ts":"1732130736548"} 2024-11-20T19:25:36,549 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T19:25:36,588 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T19:25:36,590 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.7250 sec 2024-11-20T19:25:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T19:25:36,975 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-20T19:25:36,978 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T19:25:36,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:36,984 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:36,985 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:36,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T19:25:37,003 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:37,007 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/recovered.edits] 2024-11-20T19:25:37,010 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/5c4b9c2798454cb7927cca64e5f8d296 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/5c4b9c2798454cb7927cca64e5f8d296 2024-11-20T19:25:37,012 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/95e72c140a5c4b4ca342ffff876b5ebb to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/95e72c140a5c4b4ca342ffff876b5ebb 2024-11-20T19:25:37,014 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/ab8ea8b1dd7743ca8c7b58870cd9efaf to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/ab8ea8b1dd7743ca8c7b58870cd9efaf 2024-11-20T19:25:37,016 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/bccc0791273e4535bf91f66bd65b01ef to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/A/bccc0791273e4535bf91f66bd65b01ef 2024-11-20T19:25:37,020 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/080e4e58797e4624b7b48e24b5a3b51c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/080e4e58797e4624b7b48e24b5a3b51c 2024-11-20T19:25:37,023 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c52ce36c6f22414c9e191378db56859a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/c52ce36c6f22414c9e191378db56859a 2024-11-20T19:25:37,028 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d2cd77b50a614795ab27459764c19911 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/d2cd77b50a614795ab27459764c19911 2024-11-20T19:25:37,030 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fd45133542f848ae8bf77f1fcb24da7e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/B/fd45133542f848ae8bf77f1fcb24da7e 2024-11-20T19:25:37,033 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5a5211c5b6be448d87ba4c0830ac86a9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/5a5211c5b6be448d87ba4c0830ac86a9 2024-11-20T19:25:37,034 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/787dec72e95d4cb7bf2ca25c7cfe3d81 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/787dec72e95d4cb7bf2ca25c7cfe3d81 2024-11-20T19:25:37,035 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/92b2e09a373c47559cfd3512b7ce2a74 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/92b2e09a373c47559cfd3512b7ce2a74 2024-11-20T19:25:37,036 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/a35868935bcd4e76837f31c42ed8f687 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/C/a35868935bcd4e76837f31c42ed8f687 2024-11-20T19:25:37,040 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/recovered.edits/484.seqid to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45/recovered.edits/484.seqid 2024-11-20T19:25:37,040 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6fb4967d0e6203ca72c498496394ce45 2024-11-20T19:25:37,041 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T19:25:37,046 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:37,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-20T19:25:37,053 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T19:25:37,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T19:25:37,092 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T19:25:37,093 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:37,093 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T19:25:37,094 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732130737093"}]},"ts":"9223372036854775807"} 2024-11-20T19:25:37,097 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T19:25:37,097 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6fb4967d0e6203ca72c498496394ce45, NAME => 'TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T19:25:37,097 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T19:25:37,098 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732130737097"}]},"ts":"9223372036854775807"} 2024-11-20T19:25:37,101 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T19:25:37,106 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:37,107 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 127 msec 2024-11-20T19:25:37,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T19:25:37,289 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-20T19:25:37,301 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=238 (was 219) Potentially hanging thread: RS:0;db9c3a6c6492:41229-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-531737850_22 at /127.0.0.1:40760 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x429eb088-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: hconnection-0x429eb088-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x429eb088-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x429eb088-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=543 (was 196) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3722 (was 4774) 2024-11-20T19:25:37,310 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=543, ProcessCount=11, AvailableMemoryMB=3722 2024-11-20T19:25:37,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:25:37,313 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:25:37,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:37,315 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:25:37,315 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:37,315 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 37 2024-11-20T19:25:37,316 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:25:37,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-20T19:25:37,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741941_1117 (size=960) 2024-11-20T19:25:37,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-20T19:25:37,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-20T19:25:37,727 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203 2024-11-20T19:25:37,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741942_1118 (size=53) 2024-11-20T19:25:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-20T19:25:38,138 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:38,138 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 550c6d15b8cc28d8b0f43501c9366c37, disabling compactions & flushes 2024-11-20T19:25:38,138 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:38,138 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:38,138 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. after waiting 0 ms 2024-11-20T19:25:38,138 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:38,138 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:38,138 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:38,139 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:25:38,139 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732130738139"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130738139"}]},"ts":"1732130738139"} 2024-11-20T19:25:38,140 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:25:38,141 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:25:38,141 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130738141"}]},"ts":"1732130738141"} 2024-11-20T19:25:38,142 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T19:25:38,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=550c6d15b8cc28d8b0f43501c9366c37, ASSIGN}] 2024-11-20T19:25:38,164 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=550c6d15b8cc28d8b0f43501c9366c37, ASSIGN 2024-11-20T19:25:38,165 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=550c6d15b8cc28d8b0f43501c9366c37, ASSIGN; state=OFFLINE, location=db9c3a6c6492,41229,1732130701496; forceNewPlan=false, retain=false 2024-11-20T19:25:38,316 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=550c6d15b8cc28d8b0f43501c9366c37, regionState=OPENING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:38,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; OpenRegionProcedure 550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:25:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-20T19:25:38,470 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:38,475 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:38,475 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7285): Opening region: {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:25:38,476 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:38,476 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:38,476 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7327): checking encryption for 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:38,476 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7330): checking classloading for 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:38,479 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:38,481 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:38,482 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 550c6d15b8cc28d8b0f43501c9366c37 columnFamilyName A 2024-11-20T19:25:38,482 DEBUG [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:38,483 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.HStore(327): Store=550c6d15b8cc28d8b0f43501c9366c37/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:38,483 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:38,485 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:38,486 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 550c6d15b8cc28d8b0f43501c9366c37 columnFamilyName B 2024-11-20T19:25:38,487 DEBUG [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:38,488 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.HStore(327): Store=550c6d15b8cc28d8b0f43501c9366c37/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:38,488 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:38,490 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:38,491 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 550c6d15b8cc28d8b0f43501c9366c37 columnFamilyName C 2024-11-20T19:25:38,491 DEBUG [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:38,492 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.HStore(327): Store=550c6d15b8cc28d8b0f43501c9366c37/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:38,492 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:38,493 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:38,493 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:38,495 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:25:38,496 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1085): writing seq id for 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:38,498 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:25:38,499 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1102): Opened 550c6d15b8cc28d8b0f43501c9366c37; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60049144, jitterRate=-0.10519802570343018}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:25:38,499 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1001): Region open journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:38,500 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., pid=39, masterSystemTime=1732130738469 2024-11-20T19:25:38,501 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:38,501 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:38,502 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=550c6d15b8cc28d8b0f43501c9366c37, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:38,504 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-20T19:25:38,504 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; OpenRegionProcedure 550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 in 186 msec 2024-11-20T19:25:38,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-11-20T19:25:38,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=550c6d15b8cc28d8b0f43501c9366c37, ASSIGN in 341 msec 2024-11-20T19:25:38,506 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:25:38,506 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130738506"}]},"ts":"1732130738506"} 2024-11-20T19:25:38,507 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T19:25:38,548 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:25:38,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2350 sec 2024-11-20T19:25:39,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-20T19:25:39,425 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 37 completed 2024-11-20T19:25:39,430 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4e560c7b to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ddf4c3 2024-11-20T19:25:39,489 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ff872d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:39,491 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:39,494 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:39,502 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:25:39,505 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:25:39,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:25:39,511 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:25:39,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:39,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741943_1119 (size=996) 2024-11-20T19:25:39,931 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T19:25:39,931 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T19:25:39,936 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:25:39,946 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=550c6d15b8cc28d8b0f43501c9366c37, REOPEN/MOVE}] 2024-11-20T19:25:39,947 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=550c6d15b8cc28d8b0f43501c9366c37, REOPEN/MOVE 2024-11-20T19:25:39,948 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=550c6d15b8cc28d8b0f43501c9366c37, regionState=CLOSING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:39,949 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:25:39,949 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; CloseRegionProcedure 550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:25:40,101 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,101 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(124): Close 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,102 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:25:40,102 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1681): Closing 550c6d15b8cc28d8b0f43501c9366c37, disabling compactions & flushes 2024-11-20T19:25:40,102 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:40,102 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:40,102 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. after waiting 0 ms 2024-11-20T19:25:40,102 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:40,108 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T19:25:40,109 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:40,110 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1635): Region close journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:40,110 WARN [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegionServer(3786): Not adding moved region record: 550c6d15b8cc28d8b0f43501c9366c37 to self. 2024-11-20T19:25:40,112 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(170): Closed 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,113 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=550c6d15b8cc28d8b0f43501c9366c37, regionState=CLOSED 2024-11-20T19:25:40,116 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-20T19:25:40,117 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; CloseRegionProcedure 550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 in 165 msec 2024-11-20T19:25:40,117 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=550c6d15b8cc28d8b0f43501c9366c37, REOPEN/MOVE; state=CLOSED, location=db9c3a6c6492,41229,1732130701496; forceNewPlan=false, retain=true 2024-11-20T19:25:40,268 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=550c6d15b8cc28d8b0f43501c9366c37, regionState=OPENING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,269 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure 550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:25:40,422 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,428 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:40,429 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:25:40,430 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,430 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:40,430 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,430 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,433 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,434 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:40,440 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 550c6d15b8cc28d8b0f43501c9366c37 columnFamilyName A 2024-11-20T19:25:40,443 DEBUG [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:40,443 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.HStore(327): Store=550c6d15b8cc28d8b0f43501c9366c37/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:40,444 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,445 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:40,445 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 550c6d15b8cc28d8b0f43501c9366c37 columnFamilyName B 2024-11-20T19:25:40,445 DEBUG [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:40,446 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.HStore(327): Store=550c6d15b8cc28d8b0f43501c9366c37/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:40,446 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,446 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:40,447 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 550c6d15b8cc28d8b0f43501c9366c37 columnFamilyName C 2024-11-20T19:25:40,447 DEBUG [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:40,447 INFO [StoreOpener-550c6d15b8cc28d8b0f43501c9366c37-1 {}] regionserver.HStore(327): Store=550c6d15b8cc28d8b0f43501c9366c37/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:40,447 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:40,448 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,449 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,451 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:25:40,453 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,454 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 550c6d15b8cc28d8b0f43501c9366c37; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60583797, jitterRate=-0.09723107516765594}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:25:40,456 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:40,457 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., pid=44, masterSystemTime=1732130740421 2024-11-20T19:25:40,459 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=550c6d15b8cc28d8b0f43501c9366c37, regionState=OPEN, openSeqNum=5, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,460 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:40,463 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-11-20T19:25:40,463 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure 550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 in 192 msec 2024-11-20T19:25:40,465 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-20T19:25:40,465 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=550c6d15b8cc28d8b0f43501c9366c37, REOPEN/MOVE in 517 msec 2024-11-20T19:25:40,466 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:40,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-20T19:25:40,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 531 msec 2024-11-20T19:25:40,471 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 957 msec 2024-11-20T19:25:40,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-20T19:25:40,479 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c826820 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@29458edd 2024-11-20T19:25:40,506 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cae6c5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:40,507 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2931c73e to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c7d6279 2024-11-20T19:25:40,521 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c820ef9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:40,523 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x176c5c1b to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@328f994d 2024-11-20T19:25:40,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3a4420, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:40,532 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x190853fc to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a9306be 2024-11-20T19:25:40,539 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42e904d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:40,540 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46114993 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@769942d9 2024-11-20T19:25:40,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c5c4716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:40,549 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x367f47f7 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2885d2d9 2024-11-20T19:25:40,556 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cb464a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:40,557 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x247c0c93 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22e911df 2024-11-20T19:25:40,564 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78cafade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:40,565 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x517ff977 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b727d6e 2024-11-20T19:25:40,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c16cd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:40,574 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3448d233 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7940d9 2024-11-20T19:25:40,581 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341384e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:40,584 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:40,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-11-20T19:25:40,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:40,585 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:40,586 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:40,586 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:40,590 DEBUG [hconnection-0x363e72bb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:40,590 DEBUG [hconnection-0x199d50a6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:40,592 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37542, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:40,595 DEBUG [hconnection-0x5dff5b82-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:40,595 DEBUG [hconnection-0x169e81a6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:40,595 DEBUG [hconnection-0x307c6de5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:40,595 DEBUG [hconnection-0x3c27b94a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:40,596 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:40,596 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37554, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:40,596 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:40,596 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37578, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:40,600 DEBUG [hconnection-0x7d4885c8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:40,601 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:40,601 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37602, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:40,607 DEBUG [hconnection-0x2a49bace-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:40,608 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37610, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:40,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:25:40,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:40,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:40,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:40,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:40,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:40,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:40,621 DEBUG [hconnection-0x1779166a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:40,622 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37618, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:40,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fc5d5df803194d859e269484a33ae6a3_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130740601/Put/seqid=0 2024-11-20T19:25:40,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:40,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130800658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:40,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130800660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:40,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130800664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:40,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130800664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:40,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130800668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:40,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741944_1120 (size=12154) 2024-11-20T19:25:40,709 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:40,717 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fc5d5df803194d859e269484a33ae6a3_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fc5d5df803194d859e269484a33ae6a3_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,718 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/5d093c1eef174910b977a6c15abd3cd0, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:40,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/5d093c1eef174910b977a6c15abd3cd0 is 175, key is test_row_0/A:col10/1732130740601/Put/seqid=0 2024-11-20T19:25:40,737 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:40,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:40,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:40,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:40,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:40,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:40,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:40,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130800766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:40,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130800769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:40,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130800773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:40,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130800773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:40,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130800774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741945_1121 (size=30955) 2024-11-20T19:25:40,795 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/5d093c1eef174910b977a6c15abd3cd0 2024-11-20T19:25:40,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/61e5c3384e2c4311b10201aeb373fae1 is 50, key is test_row_0/B:col10/1732130740601/Put/seqid=0 2024-11-20T19:25:40,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741946_1122 (size=12001) 2024-11-20T19:25:40,841 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/61e5c3384e2c4311b10201aeb373fae1 2024-11-20T19:25:40,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/a394d14ae34548d69b1a7ff7ecee0a15 is 50, key is test_row_0/C:col10/1732130740601/Put/seqid=0 2024-11-20T19:25:40,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:40,891 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:40,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:40,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:40,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:40,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:40,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:40,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:40,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:40,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741947_1123 (size=12001) 2024-11-20T19:25:40,905 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/a394d14ae34548d69b1a7ff7ecee0a15 2024-11-20T19:25:40,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/5d093c1eef174910b977a6c15abd3cd0 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5d093c1eef174910b977a6c15abd3cd0 2024-11-20T19:25:40,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5d093c1eef174910b977a6c15abd3cd0, entries=150, sequenceid=17, filesize=30.2 K 2024-11-20T19:25:40,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/61e5c3384e2c4311b10201aeb373fae1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/61e5c3384e2c4311b10201aeb373fae1 2024-11-20T19:25:40,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/61e5c3384e2c4311b10201aeb373fae1, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T19:25:40,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/a394d14ae34548d69b1a7ff7ecee0a15 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/a394d14ae34548d69b1a7ff7ecee0a15 2024-11-20T19:25:40,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/a394d14ae34548d69b1a7ff7ecee0a15, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T19:25:40,948 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 550c6d15b8cc28d8b0f43501c9366c37 in 338ms, sequenceid=17, compaction requested=false 2024-11-20T19:25:40,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:40,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:40,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:25:40,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:40,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:40,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:40,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:40,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:40,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:41,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ed9433c0c7a94d719def3df44842a865_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130740660/Put/seqid=0 2024-11-20T19:25:41,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130800997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130801001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130801010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130801014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130801017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741948_1124 (size=12154) 2024-11-20T19:25:41,029 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:41,034 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ed9433c0c7a94d719def3df44842a865_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ed9433c0c7a94d719def3df44842a865_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:41,037 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/49b85e0e5b0546ef92f23f93e4b08d91, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:41,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/49b85e0e5b0546ef92f23f93e4b08d91 is 175, key is test_row_0/A:col10/1732130740660/Put/seqid=0 2024-11-20T19:25:41,047 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:41,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:41,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:41,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:41,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:41,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:41,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:41,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741949_1125 (size=30955) 2024-11-20T19:25:41,064 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=43, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/49b85e0e5b0546ef92f23f93e4b08d91 2024-11-20T19:25:41,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/846829718ab44cf7a862b39d8e338902 is 50, key is test_row_0/B:col10/1732130740660/Put/seqid=0 2024-11-20T19:25:41,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741950_1126 (size=12001) 2024-11-20T19:25:41,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/846829718ab44cf7a862b39d8e338902 2024-11-20T19:25:41,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130801119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130801120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130801129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130801131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130801129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/bd6d7d8da3294057831b11b202c522da is 50, key is test_row_0/C:col10/1732130740660/Put/seqid=0 2024-11-20T19:25:41,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741951_1127 (size=12001) 2024-11-20T19:25:41,149 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/bd6d7d8da3294057831b11b202c522da 2024-11-20T19:25:41,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/49b85e0e5b0546ef92f23f93e4b08d91 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/49b85e0e5b0546ef92f23f93e4b08d91 2024-11-20T19:25:41,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/49b85e0e5b0546ef92f23f93e4b08d91, entries=150, sequenceid=43, filesize=30.2 K 2024-11-20T19:25:41,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/846829718ab44cf7a862b39d8e338902 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/846829718ab44cf7a862b39d8e338902 2024-11-20T19:25:41,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/846829718ab44cf7a862b39d8e338902, entries=150, sequenceid=43, filesize=11.7 K 2024-11-20T19:25:41,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/bd6d7d8da3294057831b11b202c522da as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/bd6d7d8da3294057831b11b202c522da 2024-11-20T19:25:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:41,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/bd6d7d8da3294057831b11b202c522da, entries=150, sequenceid=43, filesize=11.7 K 2024-11-20T19:25:41,197 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 550c6d15b8cc28d8b0f43501c9366c37 in 211ms, sequenceid=43, compaction requested=false 2024-11-20T19:25:41,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:41,201 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:41,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:41,202 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T19:25:41,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:41,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:41,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:41,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:41,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:41,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:41,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bc2fbb70fad744baa0c774fe529f82cf_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130740990/Put/seqid=0 2024-11-20T19:25:41,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741952_1128 (size=12154) 2024-11-20T19:25:41,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:41,259 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bc2fbb70fad744baa0c774fe529f82cf_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bc2fbb70fad744baa0c774fe529f82cf_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:41,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/01d98db437944a37b30ed0557f306a67, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:41,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/01d98db437944a37b30ed0557f306a67 is 175, key is test_row_0/A:col10/1732130740990/Put/seqid=0 2024-11-20T19:25:41,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741953_1129 (size=30955) 2024-11-20T19:25:41,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:41,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:41,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130801424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130801424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130801428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130801440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130801440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130801542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130801545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130801546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130801547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130801548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:41,699 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/01d98db437944a37b30ed0557f306a67 2024-11-20T19:25:41,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/5318d971096c4d5695b447802adeb277 is 50, key is test_row_0/B:col10/1732130740990/Put/seqid=0 2024-11-20T19:25:41,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741954_1130 (size=12001) 2024-11-20T19:25:41,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130801750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130801757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130801758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130801758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:41,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130801759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:42,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130802058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:42,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130802063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130802064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:42,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130802065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:42,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130802070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:42,088 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T19:25:42,132 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/5318d971096c4d5695b447802adeb277 2024-11-20T19:25:42,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/ebc251d35b714cc4b6949cf5d37d66e8 is 50, key is test_row_0/C:col10/1732130740990/Put/seqid=0 2024-11-20T19:25:42,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741955_1131 (size=12001) 2024-11-20T19:25:42,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130802568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:42,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130802569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:42,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130802581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:42,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130802585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:42,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130802569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:42,634 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/ebc251d35b714cc4b6949cf5d37d66e8 2024-11-20T19:25:42,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/01d98db437944a37b30ed0557f306a67 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/01d98db437944a37b30ed0557f306a67 2024-11-20T19:25:42,650 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/01d98db437944a37b30ed0557f306a67, entries=150, sequenceid=53, filesize=30.2 K 2024-11-20T19:25:42,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/5318d971096c4d5695b447802adeb277 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5318d971096c4d5695b447802adeb277 2024-11-20T19:25:42,662 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5318d971096c4d5695b447802adeb277, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T19:25:42,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/ebc251d35b714cc4b6949cf5d37d66e8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/ebc251d35b714cc4b6949cf5d37d66e8 2024-11-20T19:25:42,672 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/ebc251d35b714cc4b6949cf5d37d66e8, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T19:25:42,674 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 550c6d15b8cc28d8b0f43501c9366c37 in 1472ms, sequenceid=53, compaction requested=true 2024-11-20T19:25:42,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:42,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:42,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-11-20T19:25:42,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-11-20T19:25:42,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-20T19:25:42,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0900 sec 2024-11-20T19:25:42,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 2.0940 sec 2024-11-20T19:25:42,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:42,692 INFO [Thread-610 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-20T19:25:42,694 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-20T19:25:42,697 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T19:25:42,698 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:42,698 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T19:25:42,850 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:42,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T19:25:42,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:42,852 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T19:25:42,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:42,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:42,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:42,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:42,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:42,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:42,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112061dc8e6cec994d3f9eaa6b67b7af7a6e_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130741418/Put/seqid=0 2024-11-20T19:25:42,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741956_1132 (size=12154) 2024-11-20T19:25:42,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:42,928 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112061dc8e6cec994d3f9eaa6b67b7af7a6e_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112061dc8e6cec994d3f9eaa6b67b7af7a6e_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:42,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/dc898803156e497f83539a409f4d4a8f, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:42,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/dc898803156e497f83539a409f4d4a8f is 175, key is test_row_0/A:col10/1732130741418/Put/seqid=0 2024-11-20T19:25:42,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741957_1133 (size=30955) 2024-11-20T19:25:43,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T19:25:43,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T19:25:43,371 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=80, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/dc898803156e497f83539a409f4d4a8f 2024-11-20T19:25:43,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/8800a7ee82b24d27b0652ef87a0cf764 is 50, key is test_row_0/B:col10/1732130741418/Put/seqid=0 2024-11-20T19:25:43,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741958_1134 (size=12001) 2024-11-20T19:25:43,461 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/8800a7ee82b24d27b0652ef87a0cf764 2024-11-20T19:25:43,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/8ae3efd97e704d05ad00b7009d6114f2 is 50, key is test_row_0/C:col10/1732130741418/Put/seqid=0 2024-11-20T19:25:43,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741959_1135 (size=12001) 2024-11-20T19:25:43,540 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/8ae3efd97e704d05ad00b7009d6114f2 2024-11-20T19:25:43,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/dc898803156e497f83539a409f4d4a8f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/dc898803156e497f83539a409f4d4a8f 2024-11-20T19:25:43,555 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/dc898803156e497f83539a409f4d4a8f, entries=150, sequenceid=80, filesize=30.2 K 2024-11-20T19:25:43,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/8800a7ee82b24d27b0652ef87a0cf764 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/8800a7ee82b24d27b0652ef87a0cf764 2024-11-20T19:25:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,566 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/8800a7ee82b24d27b0652ef87a0cf764, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T19:25:43,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/8ae3efd97e704d05ad00b7009d6114f2 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/8ae3efd97e704d05ad00b7009d6114f2 2024-11-20T19:25:43,575 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/8ae3efd97e704d05ad00b7009d6114f2, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T19:25:43,576 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=0 B/0 for 550c6d15b8cc28d8b0f43501c9366c37 in 724ms, sequenceid=80, compaction requested=true 2024-11-20T19:25:43,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:43,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:43,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-20T19:25:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-20T19:25:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-20T19:25:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 880 msec 2024-11-20T19:25:43,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 887 msec 2024-11-20T19:25:43,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T19:25:43,803 INFO [Thread-610 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-20T19:25:43,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,806 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:43,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-20T19:25:43,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,808 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:43,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:43,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:43,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:43,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:43,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:43,809 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:43,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:43,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:43,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:43,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:43,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:43,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207840e21fe2004b519cd7c1ca79a57dcf_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130743797/Put/seqid=0 2024-11-20T19:25:43,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741960_1136 (size=24358) 2024-11-20T19:25:43,898 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,905 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207840e21fe2004b519cd7c1ca79a57dcf_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207840e21fe2004b519cd7c1ca79a57dcf_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:43,907 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/a7c9fa42226d4c68a9163bf79da6a0d3, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:43,908 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/a7c9fa42226d4c68a9163bf79da6a0d3 is 175, key is test_row_0/A:col10/1732130743797/Put/seqid=0 2024-11-20T19:25:43,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741961_1137 (size=73995) 2024-11-20T19:25:43,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:43,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,961 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:43,962 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:43,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:43,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:43,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:43,962 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:43,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:43,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:43,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130803947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:43,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130803949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:43,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130803949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:43,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130803951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:43,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130803952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130804067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130804068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130804069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130804070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130804072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:44,115 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,115 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:44,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:44,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,268 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:44,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:44,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130804281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130804282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130804282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130804283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130804301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,312 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=91, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/a7c9fa42226d4c68a9163bf79da6a0d3 2024-11-20T19:25:44,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/e8aba093c5ac42f2946fdcc386a9b797 is 50, key is test_row_0/B:col10/1732130743797/Put/seqid=0 2024-11-20T19:25:44,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741962_1138 (size=12001) 2024-11-20T19:25:44,384 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/e8aba093c5ac42f2946fdcc386a9b797 2024-11-20T19:25:44,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/0b72f2eb6ed845049e381c31bfc167cb is 50, key is test_row_0/C:col10/1732130743797/Put/seqid=0 2024-11-20T19:25:44,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:44,424 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:44,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:44,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741963_1139 (size=12001) 2024-11-20T19:25:44,577 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:44,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:44,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130804595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130804597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130804599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130804598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130804606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,730 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,731 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:44,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:44,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,857 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/0b72f2eb6ed845049e381c31bfc167cb 2024-11-20T19:25:44,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/a7c9fa42226d4c68a9163bf79da6a0d3 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/a7c9fa42226d4c68a9163bf79da6a0d3 2024-11-20T19:25:44,880 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/a7c9fa42226d4c68a9163bf79da6a0d3, entries=400, sequenceid=91, filesize=72.3 K 2024-11-20T19:25:44,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/e8aba093c5ac42f2946fdcc386a9b797 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/e8aba093c5ac42f2946fdcc386a9b797 2024-11-20T19:25:44,884 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:44,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:44,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:44,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,885 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:44,891 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/e8aba093c5ac42f2946fdcc386a9b797, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T19:25:44,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/0b72f2eb6ed845049e381c31bfc167cb as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/0b72f2eb6ed845049e381c31bfc167cb 2024-11-20T19:25:44,901 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/0b72f2eb6ed845049e381c31bfc167cb, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T19:25:44,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 550c6d15b8cc28d8b0f43501c9366c37 in 1094ms, sequenceid=91, compaction requested=true 2024-11-20T19:25:44,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:44,902 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T19:25:44,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:44,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:44,903 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T19:25:44,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:44,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:44,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:44,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:44,905 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 197815 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T19:25:44,905 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/A is initiating minor compaction (all files) 2024-11-20T19:25:44,905 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/A in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,906 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5d093c1eef174910b977a6c15abd3cd0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/49b85e0e5b0546ef92f23f93e4b08d91, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/01d98db437944a37b30ed0557f306a67, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/dc898803156e497f83539a409f4d4a8f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/a7c9fa42226d4c68a9163bf79da6a0d3] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=193.2 K 2024-11-20T19:25:44,906 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,906 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5d093c1eef174910b977a6c15abd3cd0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/49b85e0e5b0546ef92f23f93e4b08d91, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/01d98db437944a37b30ed0557f306a67, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/dc898803156e497f83539a409f4d4a8f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/a7c9fa42226d4c68a9163bf79da6a0d3] 2024-11-20T19:25:44,906 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60005 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T19:25:44,906 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/B is initiating minor compaction (all files) 2024-11-20T19:25:44,906 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/B in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:44,907 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/61e5c3384e2c4311b10201aeb373fae1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/846829718ab44cf7a862b39d8e338902, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5318d971096c4d5695b447802adeb277, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/8800a7ee82b24d27b0652ef87a0cf764, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/e8aba093c5ac42f2946fdcc386a9b797] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=58.6 K 2024-11-20T19:25:44,907 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d093c1eef174910b977a6c15abd3cd0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732130740601 2024-11-20T19:25:44,908 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 61e5c3384e2c4311b10201aeb373fae1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732130740601 2024-11-20T19:25:44,909 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49b85e0e5b0546ef92f23f93e4b08d91, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732130740660 2024-11-20T19:25:44,909 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 846829718ab44cf7a862b39d8e338902, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732130740660 2024-11-20T19:25:44,910 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01d98db437944a37b30ed0557f306a67, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732130740987 2024-11-20T19:25:44,910 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5318d971096c4d5695b447802adeb277, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732130740987 2024-11-20T19:25:44,910 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc898803156e497f83539a409f4d4a8f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732130741412 2024-11-20T19:25:44,910 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8800a7ee82b24d27b0652ef87a0cf764, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732130741412 2024-11-20T19:25:44,911 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7c9fa42226d4c68a9163bf79da6a0d3, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130743611 2024-11-20T19:25:44,911 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting e8aba093c5ac42f2946fdcc386a9b797, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130743682 2024-11-20T19:25:44,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:44,939 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:44,941 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#B#compaction#117 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:44,943 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/78a66d65bf424f7d8de284f0c93b285d is 50, key is test_row_0/B:col10/1732130743797/Put/seqid=0 2024-11-20T19:25:44,956 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120531860fc74c64ed58bad4106028651ef_550c6d15b8cc28d8b0f43501c9366c37 store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:44,969 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120531860fc74c64ed58bad4106028651ef_550c6d15b8cc28d8b0f43501c9366c37, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:44,970 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120531860fc74c64ed58bad4106028651ef_550c6d15b8cc28d8b0f43501c9366c37 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:44,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741964_1140 (size=12173) 2024-11-20T19:25:45,039 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741965_1141 (size=4469) 2024-11-20T19:25:45,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:45,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:45,040 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:45,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:45,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:45,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:45,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:45,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:45,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:45,040 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#A#compaction#118 average throughput is 0.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:45,042 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/2aef246337d042709b36993f07ddba1c is 175, key is test_row_0/A:col10/1732130743797/Put/seqid=0 2024-11-20T19:25:45,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bfeef67a2ade4dac8d63d4f37773b2ce_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130743949/Put/seqid=0 2024-11-20T19:25:45,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741966_1142 (size=31127) 2024-11-20T19:25:45,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:45,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:45,126 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/2aef246337d042709b36993f07ddba1c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/2aef246337d042709b36993f07ddba1c 2024-11-20T19:25:45,134 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/A of 550c6d15b8cc28d8b0f43501c9366c37 into 2aef246337d042709b36993f07ddba1c(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:45,134 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:45,135 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/A, priority=11, startTime=1732130744902; duration=0sec 2024-11-20T19:25:45,135 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:45,135 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:A 2024-11-20T19:25:45,135 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T19:25:45,137 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60005 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T19:25:45,137 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/C is initiating minor compaction (all files) 2024-11-20T19:25:45,137 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/C in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:45,138 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/a394d14ae34548d69b1a7ff7ecee0a15, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/bd6d7d8da3294057831b11b202c522da, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/ebc251d35b714cc4b6949cf5d37d66e8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/8ae3efd97e704d05ad00b7009d6114f2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/0b72f2eb6ed845049e381c31bfc167cb] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=58.6 K 2024-11-20T19:25:45,138 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting a394d14ae34548d69b1a7ff7ecee0a15, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732130740601 2024-11-20T19:25:45,138 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd6d7d8da3294057831b11b202c522da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732130740660 2024-11-20T19:25:45,140 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebc251d35b714cc4b6949cf5d37d66e8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732130740987 2024-11-20T19:25:45,140 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ae3efd97e704d05ad00b7009d6114f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732130741412 2024-11-20T19:25:45,141 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b72f2eb6ed845049e381c31bfc167cb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130743682 2024-11-20T19:25:45,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741967_1143 (size=12154) 2024-11-20T19:25:45,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:45,159 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bfeef67a2ade4dac8d63d4f37773b2ce_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bfeef67a2ade4dac8d63d4f37773b2ce_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:45,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130805146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130805146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130805148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130805149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/47d19ad24c824e92ad681319632f7d04, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:45,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/47d19ad24c824e92ad681319632f7d04 is 175, key is test_row_0/A:col10/1732130743949/Put/seqid=0 2024-11-20T19:25:45,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130805152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,165 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#C#compaction#120 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:45,165 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/118831bfd2754c93b2c25a877cdbce06 is 50, key is test_row_0/C:col10/1732130743797/Put/seqid=0 2024-11-20T19:25:45,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741968_1144 (size=30955) 2024-11-20T19:25:45,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741969_1145 (size=12173) 2024-11-20T19:25:45,237 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/118831bfd2754c93b2c25a877cdbce06 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/118831bfd2754c93b2c25a877cdbce06 2024-11-20T19:25:45,244 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/C of 550c6d15b8cc28d8b0f43501c9366c37 into 118831bfd2754c93b2c25a877cdbce06(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:45,245 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:45,245 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/C, priority=11, startTime=1732130744903; duration=0sec 2024-11-20T19:25:45,245 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:45,245 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:C 2024-11-20T19:25:45,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130805261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130805261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130805262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130805272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130805273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,398 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/78a66d65bf424f7d8de284f0c93b285d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/78a66d65bf424f7d8de284f0c93b285d 2024-11-20T19:25:45,413 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/B of 550c6d15b8cc28d8b0f43501c9366c37 into 78a66d65bf424f7d8de284f0c93b285d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:45,413 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:45,413 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/B, priority=11, startTime=1732130744903; duration=0sec 2024-11-20T19:25:45,414 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:45,414 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:B 2024-11-20T19:25:45,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130805465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130805466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130805470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130805483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130805493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,617 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=116, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/47d19ad24c824e92ad681319632f7d04 2024-11-20T19:25:45,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/c8fa962569904e9092e0825ae124b76a is 50, key is test_row_0/B:col10/1732130743949/Put/seqid=0 2024-11-20T19:25:45,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741970_1146 (size=12001) 2024-11-20T19:25:45,697 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/c8fa962569904e9092e0825ae124b76a 2024-11-20T19:25:45,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/e094a78938ac4c11af0ac8f066c4b4ee is 50, key is test_row_0/C:col10/1732130743949/Put/seqid=0 2024-11-20T19:25:45,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741971_1147 (size=12001) 2024-11-20T19:25:45,783 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/e094a78938ac4c11af0ac8f066c4b4ee 2024-11-20T19:25:45,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130805778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130805779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130805779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/47d19ad24c824e92ad681319632f7d04 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/47d19ad24c824e92ad681319632f7d04 2024-11-20T19:25:45,795 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/47d19ad24c824e92ad681319632f7d04, entries=150, sequenceid=116, filesize=30.2 K 2024-11-20T19:25:45,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/c8fa962569904e9092e0825ae124b76a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/c8fa962569904e9092e0825ae124b76a 2024-11-20T19:25:45,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130805791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,805 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/c8fa962569904e9092e0825ae124b76a, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T19:25:45,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130805803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:45,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/e094a78938ac4c11af0ac8f066c4b4ee as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/e094a78938ac4c11af0ac8f066c4b4ee 2024-11-20T19:25:45,813 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/e094a78938ac4c11af0ac8f066c4b4ee, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T19:25:45,821 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 550c6d15b8cc28d8b0f43501c9366c37 in 781ms, sequenceid=116, compaction requested=false 2024-11-20T19:25:45,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:45,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:45,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-20T19:25:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-20T19:25:45,824 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-20T19:25:45,824 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0140 sec 2024-11-20T19:25:45,826 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.0190 sec 2024-11-20T19:25:45,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:45,915 INFO [Thread-610 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-20T19:25:45,917 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:45,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-20T19:25:45,918 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:45,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T19:25:45,919 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:45,919 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:46,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T19:25:46,072 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:25:46,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:46,073 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:46,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:46,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:46,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:46,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:46,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:46,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:46,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112034229c6346724f5a81ecbd2b601d6f60_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130745143/Put/seqid=0 2024-11-20T19:25:46,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741972_1148 (size=12154) 2024-11-20T19:25:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T19:25:46,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:46,367 INFO [master/db9c3a6c6492:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T19:25:46,367 INFO [master/db9c3a6c6492:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T19:25:46,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130806367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130806373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130806375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130806378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130806379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130806482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130806483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130806488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130806489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130806492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T19:25:46,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:46,557 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112034229c6346724f5a81ecbd2b601d6f60_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112034229c6346724f5a81ecbd2b601d6f60_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:46,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/7645bcf826004b19bd2ab4eafd4ec161, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:46,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/7645bcf826004b19bd2ab4eafd4ec161 is 175, key is test_row_0/A:col10/1732130745143/Put/seqid=0 2024-11-20T19:25:46,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741973_1149 (size=30955) 2024-11-20T19:25:46,623 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=130, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/7645bcf826004b19bd2ab4eafd4ec161 2024-11-20T19:25:46,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/5e8a9c436db34b498f3a481a6b68ecd3 is 50, key is test_row_0/B:col10/1732130745143/Put/seqid=0 2024-11-20T19:25:46,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130806689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130806692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741974_1150 (size=12001) 2024-11-20T19:25:46,708 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/5e8a9c436db34b498f3a481a6b68ecd3 2024-11-20T19:25:46,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130806706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130806708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130806708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:46,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/8f7683f20c39475681998015b40a5164 is 50, key is test_row_0/C:col10/1732130745143/Put/seqid=0 2024-11-20T19:25:46,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741975_1151 (size=12001) 2024-11-20T19:25:46,753 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/8f7683f20c39475681998015b40a5164 2024-11-20T19:25:46,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/7645bcf826004b19bd2ab4eafd4ec161 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7645bcf826004b19bd2ab4eafd4ec161 2024-11-20T19:25:46,770 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7645bcf826004b19bd2ab4eafd4ec161, entries=150, sequenceid=130, filesize=30.2 K 2024-11-20T19:25:46,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/5e8a9c436db34b498f3a481a6b68ecd3 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5e8a9c436db34b498f3a481a6b68ecd3 2024-11-20T19:25:46,779 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5e8a9c436db34b498f3a481a6b68ecd3, entries=150, sequenceid=130, filesize=11.7 K 2024-11-20T19:25:46,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/8f7683f20c39475681998015b40a5164 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/8f7683f20c39475681998015b40a5164 2024-11-20T19:25:46,789 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/8f7683f20c39475681998015b40a5164, entries=150, sequenceid=130, filesize=11.7 K 2024-11-20T19:25:46,790 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 550c6d15b8cc28d8b0f43501c9366c37 in 717ms, sequenceid=130, compaction requested=true 2024-11-20T19:25:46,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:46,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:46,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-20T19:25:46,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-20T19:25:46,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-20T19:25:46,795 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 874 msec 2024-11-20T19:25:46,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 878 msec 2024-11-20T19:25:46,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:46,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:25:46,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:46,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:46,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:46,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:46,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:46,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:47,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T19:25:47,023 INFO [Thread-610 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-20T19:25:47,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a5666da050ba4793b83ec009e252dfec_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130746376/Put/seqid=0 2024-11-20T19:25:47,026 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:47,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-20T19:25:47,028 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:47,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:25:47,029 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:47,029 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:47,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130807015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130807028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130807029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130807031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130807032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741976_1152 (size=14794) 2024-11-20T19:25:47,075 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:47,083 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a5666da050ba4793b83ec009e252dfec_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a5666da050ba4793b83ec009e252dfec_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:47,084 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/920060d388b24b6895e745cc764c1933, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:47,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/920060d388b24b6895e745cc764c1933 is 175, key is test_row_0/A:col10/1732130746376/Put/seqid=0 2024-11-20T19:25:47,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:25:47,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741977_1153 (size=39749) 2024-11-20T19:25:47,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130807134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130807135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130807136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130807136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,181 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T19:25:47,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:47,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,183 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:47,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:47,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:47,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:25:47,336 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T19:25:47,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:47,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,337 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:47,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:47,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:47,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130807341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130807341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130807341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130807343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,489 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T19:25:47,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:47,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,490 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:47,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:47,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:47,536 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=156, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/920060d388b24b6895e745cc764c1933 2024-11-20T19:25:47,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130807534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/a2e41f52e23e4255b2314d8d794f2463 is 50, key is test_row_0/B:col10/1732130746376/Put/seqid=0 2024-11-20T19:25:47,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741978_1154 (size=12151) 2024-11-20T19:25:47,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/a2e41f52e23e4255b2314d8d794f2463 2024-11-20T19:25:47,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/a56647faf65c4cfdb5936e7c5a36ff55 is 50, key is test_row_0/C:col10/1732130746376/Put/seqid=0 2024-11-20T19:25:47,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:25:47,643 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T19:25:47,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:47,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:47,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:47,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:47,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130807647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130807648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130807648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130807652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741979_1155 (size=12151) 2024-11-20T19:25:47,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/a56647faf65c4cfdb5936e7c5a36ff55 2024-11-20T19:25:47,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/920060d388b24b6895e745cc764c1933 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/920060d388b24b6895e745cc764c1933 2024-11-20T19:25:47,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/920060d388b24b6895e745cc764c1933, entries=200, sequenceid=156, filesize=38.8 K 2024-11-20T19:25:47,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/a2e41f52e23e4255b2314d8d794f2463 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a2e41f52e23e4255b2314d8d794f2463 2024-11-20T19:25:47,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a2e41f52e23e4255b2314d8d794f2463, entries=150, sequenceid=156, filesize=11.9 K 2024-11-20T19:25:47,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/a56647faf65c4cfdb5936e7c5a36ff55 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/a56647faf65c4cfdb5936e7c5a36ff55 2024-11-20T19:25:47,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/a56647faf65c4cfdb5936e7c5a36ff55, entries=150, sequenceid=156, filesize=11.9 K 2024-11-20T19:25:47,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 550c6d15b8cc28d8b0f43501c9366c37 in 713ms, sequenceid=156, compaction requested=true 2024-11-20T19:25:47,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:47,709 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:47,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:47,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:47,710 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:47,711 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132786 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:47,712 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/A is initiating minor compaction (all files) 2024-11-20T19:25:47,712 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/A in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,712 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/2aef246337d042709b36993f07ddba1c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/47d19ad24c824e92ad681319632f7d04, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7645bcf826004b19bd2ab4eafd4ec161, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/920060d388b24b6895e745cc764c1933] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=129.7 K 2024-11-20T19:25:47,712 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,712 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/2aef246337d042709b36993f07ddba1c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/47d19ad24c824e92ad681319632f7d04, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7645bcf826004b19bd2ab4eafd4ec161, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/920060d388b24b6895e745cc764c1933] 2024-11-20T19:25:47,713 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:47,713 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/B is initiating minor compaction (all files) 2024-11-20T19:25:47,713 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/B in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,713 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/78a66d65bf424f7d8de284f0c93b285d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/c8fa962569904e9092e0825ae124b76a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5e8a9c436db34b498f3a481a6b68ecd3, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a2e41f52e23e4255b2314d8d794f2463] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=47.2 K 2024-11-20T19:25:47,714 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2aef246337d042709b36993f07ddba1c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130743682 2024-11-20T19:25:47,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:47,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:47,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:47,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:47,715 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 78a66d65bf424f7d8de284f0c93b285d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130743682 2024-11-20T19:25:47,715 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c8fa962569904e9092e0825ae124b76a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732130743947 2024-11-20T19:25:47,715 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47d19ad24c824e92ad681319632f7d04, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732130743947 2024-11-20T19:25:47,716 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e8a9c436db34b498f3a481a6b68ecd3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732130745121 2024-11-20T19:25:47,723 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7645bcf826004b19bd2ab4eafd4ec161, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732130745121 2024-11-20T19:25:47,725 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a2e41f52e23e4255b2314d8d794f2463, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732130746351 2024-11-20T19:25:47,725 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 920060d388b24b6895e745cc764c1933, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732130746351 2024-11-20T19:25:47,759 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#B#compaction#129 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:47,759 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/9bfdfd35d3034e56981f19eb967c8d20 is 50, key is test_row_0/B:col10/1732130746376/Put/seqid=0 2024-11-20T19:25:47,766 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:47,797 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:47,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T19:25:47,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,798 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T19:25:47,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:47,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:47,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:47,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:47,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:47,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:47,804 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112014bace5b2c4444e7a6aa89a2f4999eae_550c6d15b8cc28d8b0f43501c9366c37 store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:47,807 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112014bace5b2c4444e7a6aa89a2f4999eae_550c6d15b8cc28d8b0f43501c9366c37, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:47,807 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112014bace5b2c4444e7a6aa89a2f4999eae_550c6d15b8cc28d8b0f43501c9366c37 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:47,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741980_1156 (size=12459) 2024-11-20T19:25:47,840 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/9bfdfd35d3034e56981f19eb967c8d20 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/9bfdfd35d3034e56981f19eb967c8d20 2024-11-20T19:25:47,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d6863164e4364075b2752f8edabae8b8_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130747013/Put/seqid=0 2024-11-20T19:25:47,848 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/B of 550c6d15b8cc28d8b0f43501c9366c37 into 9bfdfd35d3034e56981f19eb967c8d20(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:47,848 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:47,848 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/B, priority=12, startTime=1732130747710; duration=0sec 2024-11-20T19:25:47,848 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:47,848 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:B 2024-11-20T19:25:47,849 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:47,850 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:47,850 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/C is initiating minor compaction (all files) 2024-11-20T19:25:47,850 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/C in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:47,851 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/118831bfd2754c93b2c25a877cdbce06, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/e094a78938ac4c11af0ac8f066c4b4ee, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/8f7683f20c39475681998015b40a5164, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/a56647faf65c4cfdb5936e7c5a36ff55] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=47.2 K 2024-11-20T19:25:47,851 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 118831bfd2754c93b2c25a877cdbce06, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130743682 2024-11-20T19:25:47,854 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting e094a78938ac4c11af0ac8f066c4b4ee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732130743947 2024-11-20T19:25:47,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741981_1157 (size=4469) 2024-11-20T19:25:47,855 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f7683f20c39475681998015b40a5164, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732130745121 2024-11-20T19:25:47,856 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a56647faf65c4cfdb5936e7c5a36ff55, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732130746351 2024-11-20T19:25:47,856 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#A#compaction#130 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:47,857 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/ebc606cd9a904f268d193a1a936313fe is 175, key is test_row_0/A:col10/1732130746376/Put/seqid=0 2024-11-20T19:25:47,919 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#C#compaction#132 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:47,920 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/971424db5efc4aa6b7a2c8e7b733ebcd is 50, key is test_row_0/C:col10/1732130746376/Put/seqid=0 2024-11-20T19:25:47,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741982_1158 (size=12304) 2024-11-20T19:25:47,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741983_1159 (size=31413) 2024-11-20T19:25:47,955 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/ebc606cd9a904f268d193a1a936313fe as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/ebc606cd9a904f268d193a1a936313fe 2024-11-20T19:25:47,962 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/A of 550c6d15b8cc28d8b0f43501c9366c37 into ebc606cd9a904f268d193a1a936313fe(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:47,962 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:47,963 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/A, priority=12, startTime=1732130747709; duration=0sec 2024-11-20T19:25:47,963 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:47,963 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:A 2024-11-20T19:25:47,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741984_1160 (size=12459) 2024-11-20T19:25:47,992 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/971424db5efc4aa6b7a2c8e7b733ebcd as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/971424db5efc4aa6b7a2c8e7b733ebcd 2024-11-20T19:25:48,015 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/C of 550c6d15b8cc28d8b0f43501c9366c37 into 971424db5efc4aa6b7a2c8e7b733ebcd(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:48,015 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:48,015 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/C, priority=12, startTime=1732130747714; duration=0sec 2024-11-20T19:25:48,015 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:48,015 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:C 2024-11-20T19:25:48,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:25:48,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:48,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:48,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130808231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130808232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130808233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130808241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,337 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d6863164e4364075b2752f8edabae8b8_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d6863164e4364075b2752f8edabae8b8_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:48,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/e3bd698e5c35478ab71a5455c3015426, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:48,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/e3bd698e5c35478ab71a5455c3015426 is 175, key is test_row_0/A:col10/1732130747013/Put/seqid=0 2024-11-20T19:25:48,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130808343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130808343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130808348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130808352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741985_1161 (size=31105) 2024-11-20T19:25:48,375 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=166, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/e3bd698e5c35478ab71a5455c3015426 2024-11-20T19:25:48,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/69e78ab4ed2f4f14bf5f47306c1212fc is 50, key is test_row_0/B:col10/1732130747013/Put/seqid=0 2024-11-20T19:25:48,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741986_1162 (size=12151) 2024-11-20T19:25:48,449 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/69e78ab4ed2f4f14bf5f47306c1212fc 2024-11-20T19:25:48,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/137da047d3634fa291acd79f279c8c7f is 50, key is test_row_0/C:col10/1732130747013/Put/seqid=0 2024-11-20T19:25:48,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741987_1163 (size=12151) 2024-11-20T19:25:48,510 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/137da047d3634fa291acd79f279c8c7f 2024-11-20T19:25:48,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/e3bd698e5c35478ab71a5455c3015426 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e3bd698e5c35478ab71a5455c3015426 2024-11-20T19:25:48,534 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e3bd698e5c35478ab71a5455c3015426, entries=150, sequenceid=166, filesize=30.4 K 2024-11-20T19:25:48,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/69e78ab4ed2f4f14bf5f47306c1212fc as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/69e78ab4ed2f4f14bf5f47306c1212fc 2024-11-20T19:25:48,547 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/69e78ab4ed2f4f14bf5f47306c1212fc, entries=150, sequenceid=166, filesize=11.9 K 2024-11-20T19:25:48,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/137da047d3634fa291acd79f279c8c7f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/137da047d3634fa291acd79f279c8c7f 2024-11-20T19:25:48,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130808548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,556 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/137da047d3634fa291acd79f279c8c7f, entries=150, sequenceid=166, filesize=11.9 K 2024-11-20T19:25:48,571 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=174.43 KB/178620 for 550c6d15b8cc28d8b0f43501c9366c37 in 773ms, sequenceid=166, compaction requested=false 2024-11-20T19:25:48,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:48,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:48,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-20T19:25:48,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-20T19:25:48,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-20T19:25:48,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:48,575 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-20T19:25:48,575 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5440 sec 2024-11-20T19:25:48,577 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.5490 sec 2024-11-20T19:25:48,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:48,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:48,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:48,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:48,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:48,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:48,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130808583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130808591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130808592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130808591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a4dd3813b7e6427b9637d5ecd4d495c4_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130748567/Put/seqid=0 2024-11-20T19:25:48,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741988_1164 (size=12304) 2024-11-20T19:25:48,637 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,642 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a4dd3813b7e6427b9637d5ecd4d495c4_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a4dd3813b7e6427b9637d5ecd4d495c4_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:48,644 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/5777e0148be04c01a9725d939063b8d2, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:48,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/5777e0148be04c01a9725d939063b8d2 is 175, key is test_row_0/A:col10/1732130748567/Put/seqid=0 2024-11-20T19:25:48,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741989_1165 (size=31105) 2024-11-20T19:25:48,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130808698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130808699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130808700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130808701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130808857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130808905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130808907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130808908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:48,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130808917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,090 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=200, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/5777e0148be04c01a9725d939063b8d2 2024-11-20T19:25:49,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/a88f3f3fd572402fb2360eacac1afdf0 is 50, key is test_row_0/B:col10/1732130748567/Put/seqid=0 2024-11-20T19:25:49,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:25:49,135 INFO [Thread-610 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-20T19:25:49,136 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:49,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-20T19:25:49,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T19:25:49,147 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:49,148 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:49,148 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:49,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741990_1166 (size=12151) 2024-11-20T19:25:49,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130809209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130809213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130809219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130809232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T19:25:49,301 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T19:25:49,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:49,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,302 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130809362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T19:25:49,454 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T19:25:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,549 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/a88f3f3fd572402fb2360eacac1afdf0 2024-11-20T19:25:49,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/222d9820455442f9951573bf6aba7b0f is 50, key is test_row_0/C:col10/1732130748567/Put/seqid=0 2024-11-20T19:25:49,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741991_1167 (size=12151) 2024-11-20T19:25:49,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/222d9820455442f9951573bf6aba7b0f 2024-11-20T19:25:49,608 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T19:25:49,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:49,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/5777e0148be04c01a9725d939063b8d2 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5777e0148be04c01a9725d939063b8d2 2024-11-20T19:25:49,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5777e0148be04c01a9725d939063b8d2, entries=150, sequenceid=200, filesize=30.4 K 2024-11-20T19:25:49,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/a88f3f3fd572402fb2360eacac1afdf0 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a88f3f3fd572402fb2360eacac1afdf0 2024-11-20T19:25:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a88f3f3fd572402fb2360eacac1afdf0, entries=150, sequenceid=200, filesize=11.9 K 2024-11-20T19:25:49,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/222d9820455442f9951573bf6aba7b0f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/222d9820455442f9951573bf6aba7b0f 2024-11-20T19:25:49,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/222d9820455442f9951573bf6aba7b0f, entries=150, sequenceid=200, filesize=11.9 K 2024-11-20T19:25:49,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=20.13 KB/20610 for 550c6d15b8cc28d8b0f43501c9366c37 in 1084ms, sequenceid=200, compaction requested=true 2024-11-20T19:25:49,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:49,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:49,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:49,657 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:49,657 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,660 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93623 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,660 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/A is initiating minor compaction (all files) 2024-11-20T19:25:49,660 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/A in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,660 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/ebc606cd9a904f268d193a1a936313fe, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e3bd698e5c35478ab71a5455c3015426, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5777e0148be04c01a9725d939063b8d2] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=91.4 K 2024-11-20T19:25:49,660 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,660 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/ebc606cd9a904f268d193a1a936313fe, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e3bd698e5c35478ab71a5455c3015426, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5777e0148be04c01a9725d939063b8d2] 2024-11-20T19:25:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,660 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:49,661 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/B is initiating minor compaction (all files) 2024-11-20T19:25:49,661 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/B in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,661 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/9bfdfd35d3034e56981f19eb967c8d20, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/69e78ab4ed2f4f14bf5f47306c1212fc, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a88f3f3fd572402fb2360eacac1afdf0] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=35.9 K 2024-11-20T19:25:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,662 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebc606cd9a904f268d193a1a936313fe, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732130746351 2024-11-20T19:25:49,662 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bfdfd35d3034e56981f19eb967c8d20, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732130746351 2024-11-20T19:25:49,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,662 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3bd698e5c35478ab71a5455c3015426, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732130747013 2024-11-20T19:25:49,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,663 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 69e78ab4ed2f4f14bf5f47306c1212fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732130747013 2024-11-20T19:25:49,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:49,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:49,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,666 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5777e0148be04c01a9725d939063b8d2, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732130748567 2024-11-20T19:25:49,666 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a88f3f3fd572402fb2360eacac1afdf0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732130748567 2024-11-20T19:25:49,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,705 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,708 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#B#compaction#139 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,709 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/580309ddd5eb44919831685163871098 is 50, key is test_row_0/B:col10/1732130748567/Put/seqid=0 2024-11-20T19:25:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,718 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411209f3c803416f1427bb7a13aab11009e61_550c6d15b8cc28d8b0f43501c9366c37 store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:49,720 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411209f3c803416f1427bb7a13aab11009e61_550c6d15b8cc28d8b0f43501c9366c37, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:49,720 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209f3c803416f1427bb7a13aab11009e61_550c6d15b8cc28d8b0f43501c9366c37 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T19:25:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:49,744 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:49,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:49,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:49,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:49,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:49,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:49,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741992_1168 (size=12561) 2024-11-20T19:25:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,756 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/580309ddd5eb44919831685163871098 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/580309ddd5eb44919831685163871098 2024-11-20T19:25:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,762 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T19:25:49,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:49,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,763 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,779 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/B of 550c6d15b8cc28d8b0f43501c9366c37 into 580309ddd5eb44919831685163871098(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:49,779 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:49,779 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/B, priority=13, startTime=1732130749657; duration=0sec 2024-11-20T19:25:49,780 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:49,780 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:B 2024-11-20T19:25:49,780 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,781 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:49,781 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/C is initiating minor compaction (all files) 2024-11-20T19:25:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,781 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/C in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,782 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/971424db5efc4aa6b7a2c8e7b733ebcd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/137da047d3634fa291acd79f279c8c7f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/222d9820455442f9951573bf6aba7b0f] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=35.9 K 2024-11-20T19:25:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,783 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 971424db5efc4aa6b7a2c8e7b733ebcd, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732130746351 2024-11-20T19:25:49,784 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 137da047d3634fa291acd79f279c8c7f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732130747013 2024-11-20T19:25:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,784 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 222d9820455442f9951573bf6aba7b0f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732130748567 2024-11-20T19:25:49,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f3ab5066dff84f5b8ff0a50d208865f8_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130749743/Put/seqid=0 2024-11-20T19:25:49,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741993_1169 (size=4469) 2024-11-20T19:25:49,848 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#C#compaction#141 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:49,849 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/9b03773caa674adea8c4e584d6e2a6dd is 50, key is test_row_0/C:col10/1732130748567/Put/seqid=0 2024-11-20T19:25:49,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130809872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741994_1170 (size=19774) 2024-11-20T19:25:49,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130809874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,892 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:49,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130809875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130809876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,902 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f3ab5066dff84f5b8ff0a50d208865f8_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f3ab5066dff84f5b8ff0a50d208865f8_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:49,904 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/97d5398e0f5b4cc3a479d2ad0238a65d, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:49,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/97d5398e0f5b4cc3a479d2ad0238a65d is 175, key is test_row_0/A:col10/1732130749743/Put/seqid=0 2024-11-20T19:25:49,917 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:49,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T19:25:49,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:49,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:49,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741995_1171 (size=12561) 2024-11-20T19:25:49,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,928 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/9b03773caa674adea8c4e584d6e2a6dd as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/9b03773caa674adea8c4e584d6e2a6dd 2024-11-20T19:25:49,941 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/C of 550c6d15b8cc28d8b0f43501c9366c37 into 9b03773caa674adea8c4e584d6e2a6dd(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:49,941 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:49,941 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/C, priority=13, startTime=1732130749663; duration=0sec 2024-11-20T19:25:49,941 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:49,941 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:C 2024-11-20T19:25:49,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741996_1172 (size=57033) 2024-11-20T19:25:49,946 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/97d5398e0f5b4cc3a479d2ad0238a65d 2024-11-20T19:25:49,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/fb76522c7d3c403280037f2d0de407e3 is 50, key is test_row_0/B:col10/1732130749743/Put/seqid=0 2024-11-20T19:25:49,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741997_1173 (size=12151) 2024-11-20T19:25:49,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/fb76522c7d3c403280037f2d0de407e3 2024-11-20T19:25:50,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130809989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130809993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130809993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130809994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/03f15fe528794fb5995507df463ea744 is 50, key is test_row_0/C:col10/1732130749743/Put/seqid=0 2024-11-20T19:25:50,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741998_1174 (size=12151) 2024-11-20T19:25:50,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/03f15fe528794fb5995507df463ea744 2024-11-20T19:25:50,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/97d5398e0f5b4cc3a479d2ad0238a65d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/97d5398e0f5b4cc3a479d2ad0238a65d 2024-11-20T19:25:50,059 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/97d5398e0f5b4cc3a479d2ad0238a65d, entries=300, sequenceid=211, filesize=55.7 K 2024-11-20T19:25:50,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/fb76522c7d3c403280037f2d0de407e3 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/fb76522c7d3c403280037f2d0de407e3 2024-11-20T19:25:50,070 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T19:25:50,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:50,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/fb76522c7d3c403280037f2d0de407e3, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T19:25:50,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/03f15fe528794fb5995507df463ea744 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/03f15fe528794fb5995507df463ea744 2024-11-20T19:25:50,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/03f15fe528794fb5995507df463ea744, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T19:25:50,089 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 550c6d15b8cc28d8b0f43501c9366c37 in 345ms, sequenceid=211, compaction requested=false 2024-11-20T19:25:50,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:50,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:50,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T19:25:50,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:50,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:50,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:50,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:50,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:50,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:50,224 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T19:25:50,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:50,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112002e2e2c7d63141d08e8109148b81d09d_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130750206/Put/seqid=0 2024-11-20T19:25:50,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130810225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130810226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130810227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T19:25:50,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130810228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,249 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#A#compaction#138 average throughput is 0.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:50,250 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/7fac6ec9d6b94c7d8537a85b6f75824b is 175, key is test_row_0/A:col10/1732130748567/Put/seqid=0 2024-11-20T19:25:50,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741999_1175 (size=12304) 2024-11-20T19:25:50,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742000_1176 (size=31515) 2024-11-20T19:25:50,328 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/7fac6ec9d6b94c7d8537a85b6f75824b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7fac6ec9d6b94c7d8537a85b6f75824b 2024-11-20T19:25:50,337 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/A of 550c6d15b8cc28d8b0f43501c9366c37 into 7fac6ec9d6b94c7d8537a85b6f75824b(size=30.8 K), total size for store is 86.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:50,337 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:50,337 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/A, priority=13, startTime=1732130749657; duration=0sec 2024-11-20T19:25:50,338 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:50,338 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:A 2024-11-20T19:25:50,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130810335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130810335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130810335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130810350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130810370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,378 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T19:25:50,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:50,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,531 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T19:25:50,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:50,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130810544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130810550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130810550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130810557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,684 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T19:25:50,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:50,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,702 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:50,707 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112002e2e2c7d63141d08e8109148b81d09d_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112002e2e2c7d63141d08e8109148b81d09d_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:50,709 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/cf1b65dea18d41de8fbd39bc8baf9ae4, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:50,709 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/cf1b65dea18d41de8fbd39bc8baf9ae4 is 175, key is test_row_0/A:col10/1732130750206/Put/seqid=0 2024-11-20T19:25:50,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742001_1177 (size=31105) 2024-11-20T19:25:50,740 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=241, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/cf1b65dea18d41de8fbd39bc8baf9ae4 2024-11-20T19:25:50,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/6dd0efb953a8482cb989e98e5a256217 is 50, key is test_row_0/B:col10/1732130750206/Put/seqid=0 2024-11-20T19:25:50,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742002_1178 (size=12151) 2024-11-20T19:25:50,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/6dd0efb953a8482cb989e98e5a256217 2024-11-20T19:25:50,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/bf3a2c948e4c40b2a61679eeeecabcd9 is 50, key is test_row_0/C:col10/1732130750206/Put/seqid=0 2024-11-20T19:25:50,846 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T19:25:50,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:50,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130810850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130810854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130810854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130810866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:50,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742003_1179 (size=12151) 2024-11-20T19:25:50,874 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/bf3a2c948e4c40b2a61679eeeecabcd9 2024-11-20T19:25:50,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/cf1b65dea18d41de8fbd39bc8baf9ae4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/cf1b65dea18d41de8fbd39bc8baf9ae4 2024-11-20T19:25:50,889 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/cf1b65dea18d41de8fbd39bc8baf9ae4, entries=150, sequenceid=241, filesize=30.4 K 2024-11-20T19:25:50,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/6dd0efb953a8482cb989e98e5a256217 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/6dd0efb953a8482cb989e98e5a256217 2024-11-20T19:25:50,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/6dd0efb953a8482cb989e98e5a256217, entries=150, sequenceid=241, filesize=11.9 K 2024-11-20T19:25:50,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/bf3a2c948e4c40b2a61679eeeecabcd9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/bf3a2c948e4c40b2a61679eeeecabcd9 2024-11-20T19:25:50,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/bf3a2c948e4c40b2a61679eeeecabcd9, entries=150, sequenceid=241, filesize=11.9 K 2024-11-20T19:25:50,907 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 550c6d15b8cc28d8b0f43501c9366c37 in 698ms, sequenceid=241, compaction requested=true 2024-11-20T19:25:50,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:50,907 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:50,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:50,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:50,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:50,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:50,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:50,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:25:50,908 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:50,909 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:50,909 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/A is initiating minor compaction (all files) 2024-11-20T19:25:50,909 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/A in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,910 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7fac6ec9d6b94c7d8537a85b6f75824b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/97d5398e0f5b4cc3a479d2ad0238a65d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/cf1b65dea18d41de8fbd39bc8baf9ae4] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=116.8 K 2024-11-20T19:25:50,910 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,910 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7fac6ec9d6b94c7d8537a85b6f75824b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/97d5398e0f5b4cc3a479d2ad0238a65d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/cf1b65dea18d41de8fbd39bc8baf9ae4] 2024-11-20T19:25:50,910 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:50,910 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/B is initiating minor compaction (all files) 2024-11-20T19:25:50,910 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/B in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:50,910 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/580309ddd5eb44919831685163871098, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/fb76522c7d3c403280037f2d0de407e3, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/6dd0efb953a8482cb989e98e5a256217] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=36.0 K 2024-11-20T19:25:50,911 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fac6ec9d6b94c7d8537a85b6f75824b, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732130748567 2024-11-20T19:25:50,911 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 580309ddd5eb44919831685163871098, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732130748567 2024-11-20T19:25:50,911 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting fb76522c7d3c403280037f2d0de407e3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732130748584 2024-11-20T19:25:50,911 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97d5398e0f5b4cc3a479d2ad0238a65d, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732130748584 2024-11-20T19:25:50,912 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 6dd0efb953a8482cb989e98e5a256217, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732130749873 2024-11-20T19:25:50,912 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf1b65dea18d41de8fbd39bc8baf9ae4, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732130749873 2024-11-20T19:25:50,938 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:50,951 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#B#compaction#148 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:50,952 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/d25c244a576e495bbeb1c5b90f039e09 is 50, key is test_row_0/B:col10/1732130750206/Put/seqid=0 2024-11-20T19:25:50,953 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120d325ea1da1274237b2da60725a89a9a7_550c6d15b8cc28d8b0f43501c9366c37 store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:50,955 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120d325ea1da1274237b2da60725a89a9a7_550c6d15b8cc28d8b0f43501c9366c37, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:50,956 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d325ea1da1274237b2da60725a89a9a7_550c6d15b8cc28d8b0f43501c9366c37 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:50,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742004_1180 (size=12663) 2024-11-20T19:25:51,000 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T19:25:51,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:51,002 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T19:25:51,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:51,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:51,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:51,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:51,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:51,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:51,013 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/d25c244a576e495bbeb1c5b90f039e09 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/d25c244a576e495bbeb1c5b90f039e09 2024-11-20T19:25:51,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742005_1181 (size=4469) 2024-11-20T19:25:51,024 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/B of 550c6d15b8cc28d8b0f43501c9366c37 into d25c244a576e495bbeb1c5b90f039e09(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:51,024 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:51,024 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/B, priority=13, startTime=1732130750908; duration=0sec 2024-11-20T19:25:51,024 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:51,025 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:B 2024-11-20T19:25:51,025 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:51,027 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#A#compaction#147 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:51,027 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/27b186765d5b41918071fe0d72db837c is 175, key is test_row_0/A:col10/1732130750206/Put/seqid=0 2024-11-20T19:25:51,030 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:51,030 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/C is initiating minor compaction (all files) 2024-11-20T19:25:51,031 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/C in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:51,031 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/9b03773caa674adea8c4e584d6e2a6dd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/03f15fe528794fb5995507df463ea744, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/bf3a2c948e4c40b2a61679eeeecabcd9] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=36.0 K 2024-11-20T19:25:51,031 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b03773caa674adea8c4e584d6e2a6dd, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732130748567 2024-11-20T19:25:51,032 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 03f15fe528794fb5995507df463ea744, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732130748584 2024-11-20T19:25:51,032 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting bf3a2c948e4c40b2a61679eeeecabcd9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732130749873 2024-11-20T19:25:51,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b53df4d6ea1c4777b545f347ed53cc5e_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130750224/Put/seqid=0 2024-11-20T19:25:51,059 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#C#compaction#150 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:51,060 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/049fd35e086d474ca0ee1be475faafa9 is 50, key is test_row_0/C:col10/1732130750206/Put/seqid=0 2024-11-20T19:25:51,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742006_1182 (size=31617) 2024-11-20T19:25:51,108 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/27b186765d5b41918071fe0d72db837c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/27b186765d5b41918071fe0d72db837c 2024-11-20T19:25:51,114 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/A of 550c6d15b8cc28d8b0f43501c9366c37 into 27b186765d5b41918071fe0d72db837c(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:51,114 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:51,115 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/A, priority=13, startTime=1732130750907; duration=0sec 2024-11-20T19:25:51,115 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:51,115 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:A 2024-11-20T19:25:51,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742007_1183 (size=12304) 2024-11-20T19:25:51,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742008_1184 (size=12663) 2024-11-20T19:25:51,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T19:25:51,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:51,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:51,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130811439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130811440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130811451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130811451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:51,536 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b53df4d6ea1c4777b545f347ed53cc5e_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b53df4d6ea1c4777b545f347ed53cc5e_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:51,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/0451408ccb41448d8ab0ac03e308672e, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:51,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/0451408ccb41448d8ab0ac03e308672e is 175, key is test_row_0/A:col10/1732130750224/Put/seqid=0 2024-11-20T19:25:51,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130811553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130811554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130811556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130811557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742009_1185 (size=31105) 2024-11-20T19:25:51,603 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=251, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/0451408ccb41448d8ab0ac03e308672e 2024-11-20T19:25:51,603 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/049fd35e086d474ca0ee1be475faafa9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/049fd35e086d474ca0ee1be475faafa9 2024-11-20T19:25:51,623 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/C of 550c6d15b8cc28d8b0f43501c9366c37 into 049fd35e086d474ca0ee1be475faafa9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:51,623 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:51,623 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/C, priority=13, startTime=1732130750908; duration=0sec 2024-11-20T19:25:51,623 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:51,623 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:C 2024-11-20T19:25:51,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/894be7a2b8da473da2622bcc0ba15d71 is 50, key is test_row_0/B:col10/1732130750224/Put/seqid=0 2024-11-20T19:25:51,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742010_1186 (size=12151) 2024-11-20T19:25:51,681 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/894be7a2b8da473da2622bcc0ba15d71 2024-11-20T19:25:51,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/e613d47a44bb4378bd5333d5ed846fee is 50, key is test_row_0/C:col10/1732130750224/Put/seqid=0 2024-11-20T19:25:51,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742011_1187 (size=12151) 2024-11-20T19:25:51,744 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/e613d47a44bb4378bd5333d5ed846fee 2024-11-20T19:25:51,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/0451408ccb41448d8ab0ac03e308672e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/0451408ccb41448d8ab0ac03e308672e 2024-11-20T19:25:51,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130811759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,763 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/0451408ccb41448d8ab0ac03e308672e, entries=150, sequenceid=251, filesize=30.4 K 2024-11-20T19:25:51,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130811761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130811762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/894be7a2b8da473da2622bcc0ba15d71 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/894be7a2b8da473da2622bcc0ba15d71 2024-11-20T19:25:51,773 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/894be7a2b8da473da2622bcc0ba15d71, entries=150, sequenceid=251, filesize=11.9 K 2024-11-20T19:25:51,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130811764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:51,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/e613d47a44bb4378bd5333d5ed846fee as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/e613d47a44bb4378bd5333d5ed846fee 2024-11-20T19:25:51,789 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/e613d47a44bb4378bd5333d5ed846fee, entries=150, sequenceid=251, filesize=11.9 K 2024-11-20T19:25:51,790 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 550c6d15b8cc28d8b0f43501c9366c37 in 789ms, sequenceid=251, compaction requested=false 2024-11-20T19:25:51,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:51,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:51,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-20T19:25:51,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-20T19:25:51,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-20T19:25:51,794 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6430 sec 2024-11-20T19:25:51,795 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 2.6580 sec 2024-11-20T19:25:52,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:52,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T19:25:52,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:52,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:52,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:52,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:52,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:52,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:52,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130812074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130812076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130812077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130812077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e552cefd0f5043718e350266f94fb4d8_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130752069/Put/seqid=0 2024-11-20T19:25:52,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742012_1188 (size=17534) 2024-11-20T19:25:52,140 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:52,145 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e552cefd0f5043718e350266f94fb4d8_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e552cefd0f5043718e350266f94fb4d8_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:52,146 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/bb3b81c9d3364ba7b61efbdaafbf86f7, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:52,147 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/bb3b81c9d3364ba7b61efbdaafbf86f7 is 175, key is test_row_0/A:col10/1732130752069/Put/seqid=0 2024-11-20T19:25:52,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130812179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742013_1189 (size=48639) 2024-11-20T19:25:52,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130812189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130812190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130812387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130812388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,396 DEBUG [Thread-600 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:52,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130812396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130812397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,590 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=284, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/bb3b81c9d3364ba7b61efbdaafbf86f7 2024-11-20T19:25:52,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130812591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/995033e7637c422ebfa776bb7db9f333 is 50, key is test_row_0/B:col10/1732130752069/Put/seqid=0 2024-11-20T19:25:52,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742014_1190 (size=12301) 2024-11-20T19:25:52,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/995033e7637c422ebfa776bb7db9f333 2024-11-20T19:25:52,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/964b38e2a7ef4298a546aebaffbbb377 is 50, key is test_row_0/C:col10/1732130752069/Put/seqid=0 2024-11-20T19:25:52,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742015_1191 (size=12301) 2024-11-20T19:25:52,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130812692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/964b38e2a7ef4298a546aebaffbbb377 2024-11-20T19:25:52,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130812698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/bb3b81c9d3364ba7b61efbdaafbf86f7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/bb3b81c9d3364ba7b61efbdaafbf86f7 2024-11-20T19:25:52,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130812707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:52,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/bb3b81c9d3364ba7b61efbdaafbf86f7, entries=250, sequenceid=284, filesize=47.5 K 2024-11-20T19:25:52,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/995033e7637c422ebfa776bb7db9f333 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/995033e7637c422ebfa776bb7db9f333 2024-11-20T19:25:52,731 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/995033e7637c422ebfa776bb7db9f333, entries=150, sequenceid=284, filesize=12.0 K 2024-11-20T19:25:52,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/964b38e2a7ef4298a546aebaffbbb377 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/964b38e2a7ef4298a546aebaffbbb377 2024-11-20T19:25:52,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/964b38e2a7ef4298a546aebaffbbb377, entries=150, sequenceid=284, filesize=12.0 K 2024-11-20T19:25:52,740 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 550c6d15b8cc28d8b0f43501c9366c37 in 669ms, sequenceid=284, compaction requested=true 2024-11-20T19:25:52,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:52,741 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:52,742 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111361 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:52,742 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/A is initiating minor compaction (all files) 2024-11-20T19:25:52,743 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/A in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:52,743 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/27b186765d5b41918071fe0d72db837c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/0451408ccb41448d8ab0ac03e308672e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/bb3b81c9d3364ba7b61efbdaafbf86f7] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=108.8 K 2024-11-20T19:25:52,743 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:52,743 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/27b186765d5b41918071fe0d72db837c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/0451408ccb41448d8ab0ac03e308672e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/bb3b81c9d3364ba7b61efbdaafbf86f7] 2024-11-20T19:25:52,743 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27b186765d5b41918071fe0d72db837c, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732130749873 2024-11-20T19:25:52,744 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0451408ccb41448d8ab0ac03e308672e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732130750223 2024-11-20T19:25:52,744 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb3b81c9d3364ba7b61efbdaafbf86f7, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732130751438 2024-11-20T19:25:52,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:52,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:52,746 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:52,747 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:52,748 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/B is initiating minor compaction (all files) 2024-11-20T19:25:52,748 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/B in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:52,748 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/d25c244a576e495bbeb1c5b90f039e09, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/894be7a2b8da473da2622bcc0ba15d71, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/995033e7637c422ebfa776bb7db9f333] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=36.2 K 2024-11-20T19:25:52,748 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d25c244a576e495bbeb1c5b90f039e09, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732130749873 2024-11-20T19:25:52,749 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 894be7a2b8da473da2622bcc0ba15d71, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732130750223 2024-11-20T19:25:52,750 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 995033e7637c422ebfa776bb7db9f333, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732130751442 2024-11-20T19:25:52,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:52,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:52,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:52,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:52,767 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:52,782 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#B#compaction#157 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:52,783 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/8dbb37167a5d44cd9a8ed4863352d939 is 50, key is test_row_0/B:col10/1732130752069/Put/seqid=0 2024-11-20T19:25:52,799 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411202ff8573927e444d2a6624ee6707a89cf_550c6d15b8cc28d8b0f43501c9366c37 store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:52,801 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411202ff8573927e444d2a6624ee6707a89cf_550c6d15b8cc28d8b0f43501c9366c37, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:52,801 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202ff8573927e444d2a6624ee6707a89cf_550c6d15b8cc28d8b0f43501c9366c37 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:52,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742016_1192 (size=12915) 2024-11-20T19:25:52,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742017_1193 (size=4469) 2024-11-20T19:25:52,842 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/8dbb37167a5d44cd9a8ed4863352d939 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/8dbb37167a5d44cd9a8ed4863352d939 2024-11-20T19:25:52,850 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/B of 550c6d15b8cc28d8b0f43501c9366c37 into 8dbb37167a5d44cd9a8ed4863352d939(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:52,850 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:52,850 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/B, priority=13, startTime=1732130752746; duration=0sec 2024-11-20T19:25:52,851 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:52,851 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:B 2024-11-20T19:25:52,851 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:52,852 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:52,852 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/C is initiating minor compaction (all files) 2024-11-20T19:25:52,852 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/C in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:52,852 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/049fd35e086d474ca0ee1be475faafa9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/e613d47a44bb4378bd5333d5ed846fee, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/964b38e2a7ef4298a546aebaffbbb377] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=36.2 K 2024-11-20T19:25:52,853 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 049fd35e086d474ca0ee1be475faafa9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732130749873 2024-11-20T19:25:52,853 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting e613d47a44bb4378bd5333d5ed846fee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732130750223 2024-11-20T19:25:52,854 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 964b38e2a7ef4298a546aebaffbbb377, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732130751442 2024-11-20T19:25:52,891 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#C#compaction#158 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:52,892 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/4cd5e74764474d78b4988fb254028dbf is 50, key is test_row_0/C:col10/1732130752069/Put/seqid=0 2024-11-20T19:25:52,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742018_1194 (size=12915) 2024-11-20T19:25:52,944 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/4cd5e74764474d78b4988fb254028dbf as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/4cd5e74764474d78b4988fb254028dbf 2024-11-20T19:25:52,955 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/C of 550c6d15b8cc28d8b0f43501c9366c37 into 4cd5e74764474d78b4988fb254028dbf(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:52,955 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:52,955 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/C, priority=13, startTime=1732130752756; duration=0sec 2024-11-20T19:25:52,955 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:52,955 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:C 2024-11-20T19:25:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:53,220 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:53,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:53,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:53,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:53,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:53,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:53,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:53,231 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#A#compaction#156 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:53,232 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/4b22b0aeeed44cea8305c86a77f1a8fa is 175, key is test_row_0/A:col10/1732130752069/Put/seqid=0 2024-11-20T19:25:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T19:25:53,246 INFO [Thread-610 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-20T19:25:53,248 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-20T19:25:53,250 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T19:25:53,250 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:53,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:53,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112058b3c46461c44b34ba785ede0c1d100b_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130753217/Put/seqid=0 2024-11-20T19:25:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742019_1195 (size=31869) 2024-11-20T19:25:53,302 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/4b22b0aeeed44cea8305c86a77f1a8fa as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4b22b0aeeed44cea8305c86a77f1a8fa 2024-11-20T19:25:53,308 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/A of 550c6d15b8cc28d8b0f43501c9366c37 into 4b22b0aeeed44cea8305c86a77f1a8fa(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:53,308 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:53,309 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/A, priority=13, startTime=1732130752741; duration=0sec 2024-11-20T19:25:53,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742020_1196 (size=14994) 2024-11-20T19:25:53,309 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:53,309 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:A 2024-11-20T19:25:53,312 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:53,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130813306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,317 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112058b3c46461c44b34ba785ede0c1d100b_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112058b3c46461c44b34ba785ede0c1d100b_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:53,318 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/83c032181fef45f592235b592983b92e, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:53,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/83c032181fef45f592235b592983b92e is 175, key is test_row_0/A:col10/1732130753217/Put/seqid=0 2024-11-20T19:25:53,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130813312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130813318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T19:25:53,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742021_1197 (size=39949) 2024-11-20T19:25:53,403 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,403 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:53,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:53,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:53,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:53,403 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130813419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130813425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130813434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T19:25:53,558 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:53,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:53,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:53,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:53,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130813599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130813624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130813630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130813638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,711 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:53,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:53,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:53,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:53,712 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,763 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=297, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/83c032181fef45f592235b592983b92e 2024-11-20T19:25:53,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/375c25c6fc864e2a854f7207944042f6 is 50, key is test_row_0/B:col10/1732130753217/Put/seqid=0 2024-11-20T19:25:53,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742022_1198 (size=12301) 2024-11-20T19:25:53,839 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/375c25c6fc864e2a854f7207944042f6 2024-11-20T19:25:53,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T19:25:53,863 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:53,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:53,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:53,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:53,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/f70669460e5148ab8a6bba6c8dc2007a is 50, key is test_row_0/C:col10/1732130753217/Put/seqid=0 2024-11-20T19:25:53,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742023_1199 (size=12301) 2024-11-20T19:25:53,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/f70669460e5148ab8a6bba6c8dc2007a 2024-11-20T19:25:53,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/83c032181fef45f592235b592983b92e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/83c032181fef45f592235b592983b92e 2024-11-20T19:25:53,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/83c032181fef45f592235b592983b92e, entries=200, sequenceid=297, filesize=39.0 K 2024-11-20T19:25:53,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/375c25c6fc864e2a854f7207944042f6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/375c25c6fc864e2a854f7207944042f6 2024-11-20T19:25:53,930 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/375c25c6fc864e2a854f7207944042f6, entries=150, sequenceid=297, filesize=12.0 K 2024-11-20T19:25:53,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/f70669460e5148ab8a6bba6c8dc2007a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f70669460e5148ab8a6bba6c8dc2007a 2024-11-20T19:25:53,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130813932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130813934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:53,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f70669460e5148ab8a6bba6c8dc2007a, entries=150, sequenceid=297, filesize=12.0 K 2024-11-20T19:25:53,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 550c6d15b8cc28d8b0f43501c9366c37 in 723ms, sequenceid=297, compaction requested=false 2024-11-20T19:25:53,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:53,947 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T19:25:53,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:53,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:53,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:53,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:53,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:53,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:53,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:53,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ad6e4aa66360417197644ebce4df67ba_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130753945/Put/seqid=0 2024-11-20T19:25:53,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130813979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,016 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:54,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:54,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742024_1200 (size=14994) 2024-11-20T19:25:54,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130814084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,170 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130814298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,334 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:54,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:54,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T19:25:54,425 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:54,429 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ad6e4aa66360417197644ebce4df67ba_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ad6e4aa66360417197644ebce4df67ba_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:54,431 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/d1f2bfb0ab9c4e3f962ae4c91d1095d8, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:54,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/d1f2bfb0ab9c4e3f962ae4c91d1095d8 is 175, key is test_row_0/A:col10/1732130753945/Put/seqid=0 2024-11-20T19:25:54,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130814435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130814444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742025_1201 (size=39949) 2024-11-20T19:25:54,456 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=325, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/d1f2bfb0ab9c4e3f962ae4c91d1095d8 2024-11-20T19:25:54,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/95bc29caab574e738b966b8f0c31d2bd is 50, key is test_row_0/B:col10/1732130753945/Put/seqid=0 2024-11-20T19:25:54,489 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:54,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:54,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,490 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742026_1202 (size=12301) 2024-11-20T19:25:54,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/95bc29caab574e738b966b8f0c31d2bd 2024-11-20T19:25:54,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/62ed94897bab49b481058108b2fce75c is 50, key is test_row_0/C:col10/1732130753945/Put/seqid=0 2024-11-20T19:25:54,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742027_1203 (size=12301) 2024-11-20T19:25:54,567 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/62ed94897bab49b481058108b2fce75c 2024-11-20T19:25:54,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/d1f2bfb0ab9c4e3f962ae4c91d1095d8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1f2bfb0ab9c4e3f962ae4c91d1095d8 2024-11-20T19:25:54,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1f2bfb0ab9c4e3f962ae4c91d1095d8, entries=200, sequenceid=325, filesize=39.0 K 2024-11-20T19:25:54,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/95bc29caab574e738b966b8f0c31d2bd as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/95bc29caab574e738b966b8f0c31d2bd 2024-11-20T19:25:54,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/95bc29caab574e738b966b8f0c31d2bd, entries=150, sequenceid=325, filesize=12.0 K 2024-11-20T19:25:54,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/62ed94897bab49b481058108b2fce75c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/62ed94897bab49b481058108b2fce75c 2024-11-20T19:25:54,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/62ed94897bab49b481058108b2fce75c, entries=150, sequenceid=325, filesize=12.0 K 2024-11-20T19:25:54,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 550c6d15b8cc28d8b0f43501c9366c37 in 654ms, sequenceid=325, compaction requested=true 2024-11-20T19:25:54,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:54,600 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:54,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:54,602 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111767 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:54,602 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/A is initiating minor compaction (all files) 2024-11-20T19:25:54,602 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/A in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,602 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4b22b0aeeed44cea8305c86a77f1a8fa, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/83c032181fef45f592235b592983b92e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1f2bfb0ab9c4e3f962ae4c91d1095d8] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=109.1 K 2024-11-20T19:25:54,602 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,602 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4b22b0aeeed44cea8305c86a77f1a8fa, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/83c032181fef45f592235b592983b92e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1f2bfb0ab9c4e3f962ae4c91d1095d8] 2024-11-20T19:25:54,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:54,602 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:54,602 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b22b0aeeed44cea8305c86a77f1a8fa, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732130751442 2024-11-20T19:25:54,603 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83c032181fef45f592235b592983b92e, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732130752074 2024-11-20T19:25:54,603 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:54,603 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/B is initiating minor compaction (all files) 2024-11-20T19:25:54,604 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/B in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,604 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/8dbb37167a5d44cd9a8ed4863352d939, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/375c25c6fc864e2a854f7207944042f6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/95bc29caab574e738b966b8f0c31d2bd] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=36.6 K 2024-11-20T19:25:54,604 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1f2bfb0ab9c4e3f962ae4c91d1095d8, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732130753299 2024-11-20T19:25:54,604 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8dbb37167a5d44cd9a8ed4863352d939, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732130751442 2024-11-20T19:25:54,605 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 375c25c6fc864e2a854f7207944042f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732130752074 2024-11-20T19:25:54,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:54,606 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 95bc29caab574e738b966b8f0c31d2bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732130753299 2024-11-20T19:25:54,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:54,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:54,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:54,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:54,612 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:54,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:54,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:54,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:54,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:54,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:54,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:54,621 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:54,632 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#B#compaction#166 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:54,632 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/213595b1fb104bec84795735db6b275a is 50, key is test_row_0/B:col10/1732130753945/Put/seqid=0 2024-11-20T19:25:54,639 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120f98091d3a7814f588902daf4672777f6_550c6d15b8cc28d8b0f43501c9366c37 store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:54,641 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120f98091d3a7814f588902daf4672777f6_550c6d15b8cc28d8b0f43501c9366c37, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:54,641 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f98091d3a7814f588902daf4672777f6_550c6d15b8cc28d8b0f43501c9366c37 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:54,642 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:54,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:54,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201bf5b5e35873441685940a87e43de33c_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130753972/Put/seqid=0 2024-11-20T19:25:54,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742028_1204 (size=13017) 2024-11-20T19:25:54,694 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/213595b1fb104bec84795735db6b275a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/213595b1fb104bec84795735db6b275a 2024-11-20T19:25:54,701 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/B of 550c6d15b8cc28d8b0f43501c9366c37 into 213595b1fb104bec84795735db6b275a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:54,701 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:54,701 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/B, priority=13, startTime=1732130754602; duration=0sec 2024-11-20T19:25:54,701 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:54,701 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:B 2024-11-20T19:25:54,701 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:54,705 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:54,706 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/C is initiating minor compaction (all files) 2024-11-20T19:25:54,706 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/C in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,706 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/4cd5e74764474d78b4988fb254028dbf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f70669460e5148ab8a6bba6c8dc2007a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/62ed94897bab49b481058108b2fce75c] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=36.6 K 2024-11-20T19:25:54,706 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 4cd5e74764474d78b4988fb254028dbf, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732130751442 2024-11-20T19:25:54,707 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting f70669460e5148ab8a6bba6c8dc2007a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732130752074 2024-11-20T19:25:54,707 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 62ed94897bab49b481058108b2fce75c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732130753299 2024-11-20T19:25:54,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742029_1205 (size=12454) 2024-11-20T19:25:54,733 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:54,739 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201bf5b5e35873441685940a87e43de33c_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201bf5b5e35873441685940a87e43de33c_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:54,741 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/d8e6ba54f93d4d41ae1d714b5a17d3a6, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:54,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/d8e6ba54f93d4d41ae1d714b5a17d3a6 is 175, key is test_row_0/A:col10/1732130753972/Put/seqid=0 2024-11-20T19:25:54,748 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#C#compaction#168 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:54,748 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/390149982a1d490e95b58a62d09565aa is 50, key is test_row_0/C:col10/1732130753945/Put/seqid=0 2024-11-20T19:25:54,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130814752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742030_1206 (size=4469) 2024-11-20T19:25:54,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742032_1208 (size=13017) 2024-11-20T19:25:54,797 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742031_1207 (size=31255) 2024-11-20T19:25:54,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130814859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,950 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:54,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:54,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:54,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:54,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130815066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,104 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:55,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:55,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:55,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:55,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,169 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#A#compaction#165 average throughput is 0.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:55,170 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/ae74f069859642af922619467a43b867 is 175, key is test_row_0/A:col10/1732130753945/Put/seqid=0 2024-11-20T19:25:55,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742033_1209 (size=31971) 2024-11-20T19:25:55,209 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/390149982a1d490e95b58a62d09565aa as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/390149982a1d490e95b58a62d09565aa 2024-11-20T19:25:55,217 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/ae74f069859642af922619467a43b867 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/ae74f069859642af922619467a43b867 2024-11-20T19:25:55,224 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/C of 550c6d15b8cc28d8b0f43501c9366c37 into 390149982a1d490e95b58a62d09565aa(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:55,224 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:55,224 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/C, priority=13, startTime=1732130754607; duration=0sec 2024-11-20T19:25:55,224 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:55,224 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:C 2024-11-20T19:25:55,230 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/A of 550c6d15b8cc28d8b0f43501c9366c37 into ae74f069859642af922619467a43b867(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:55,230 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:55,230 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/A, priority=13, startTime=1732130754600; duration=0sec 2024-11-20T19:25:55,230 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:55,230 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:A 2024-11-20T19:25:55,232 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=336, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/d8e6ba54f93d4d41ae1d714b5a17d3a6 2024-11-20T19:25:55,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/326569ca34b54de3804111c3ad3bc73e is 50, key is test_row_0/B:col10/1732130753972/Put/seqid=0 2024-11-20T19:25:55,258 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:55,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:55,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:55,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:55,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742034_1210 (size=12301) 2024-11-20T19:25:55,301 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/326569ca34b54de3804111c3ad3bc73e 2024-11-20T19:25:55,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/0b00ab54557b4c1596f98c0c57829aca is 50, key is test_row_0/C:col10/1732130753972/Put/seqid=0 2024-11-20T19:25:55,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742035_1211 (size=12301) 2024-11-20T19:25:55,334 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/0b00ab54557b4c1596f98c0c57829aca 2024-11-20T19:25:55,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/d8e6ba54f93d4d41ae1d714b5a17d3a6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d8e6ba54f93d4d41ae1d714b5a17d3a6 2024-11-20T19:25:55,350 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d8e6ba54f93d4d41ae1d714b5a17d3a6, entries=150, sequenceid=336, filesize=30.5 K 2024-11-20T19:25:55,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/326569ca34b54de3804111c3ad3bc73e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/326569ca34b54de3804111c3ad3bc73e 2024-11-20T19:25:55,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T19:25:55,360 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/326569ca34b54de3804111c3ad3bc73e, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T19:25:55,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/0b00ab54557b4c1596f98c0c57829aca as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/0b00ab54557b4c1596f98c0c57829aca 2024-11-20T19:25:55,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130815369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/0b00ab54557b4c1596f98c0c57829aca, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T19:25:55,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 550c6d15b8cc28d8b0f43501c9366c37 in 763ms, sequenceid=336, compaction requested=false 2024-11-20T19:25:55,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:55,412 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T19:25:55,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:55,413 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:55,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:55,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:55,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:55,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:55,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:55,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:55,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a98cf7f46e874729a57c91e12ff5fbb7_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130754730/Put/seqid=0 2024-11-20T19:25:55,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:55,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:55,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742036_1212 (size=12454) 2024-11-20T19:25:55,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130815475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130815476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:55,492 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a98cf7f46e874729a57c91e12ff5fbb7_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a98cf7f46e874729a57c91e12ff5fbb7_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:55,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/03e5bd1f2f8c40be9d85e34ef8307a39, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:55,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/03e5bd1f2f8c40be9d85e34ef8307a39 is 175, key is test_row_0/A:col10/1732130754730/Put/seqid=0 2024-11-20T19:25:55,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742037_1213 (size=31255) 2024-11-20T19:25:55,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130815581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130815581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130815602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,605 DEBUG [Thread-606 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:55,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130815784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130815786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130815874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:55,933 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/03e5bd1f2f8c40be9d85e34ef8307a39 2024-11-20T19:25:55,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/3cc1cafb98cd4394ad2a4ac9aff510a2 is 50, key is test_row_0/B:col10/1732130754730/Put/seqid=0 2024-11-20T19:25:55,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742038_1214 (size=12301) 2024-11-20T19:25:55,982 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/3cc1cafb98cd4394ad2a4ac9aff510a2 2024-11-20T19:25:55,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/7b3a8d75a2fb47f5b3507573c4efbaf7 is 50, key is test_row_0/C:col10/1732130754730/Put/seqid=0 2024-11-20T19:25:56,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742039_1215 (size=12301) 2024-11-20T19:25:56,005 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/7b3a8d75a2fb47f5b3507573c4efbaf7 2024-11-20T19:25:56,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/03e5bd1f2f8c40be9d85e34ef8307a39 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/03e5bd1f2f8c40be9d85e34ef8307a39 2024-11-20T19:25:56,015 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/03e5bd1f2f8c40be9d85e34ef8307a39, entries=150, sequenceid=364, filesize=30.5 K 2024-11-20T19:25:56,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/3cc1cafb98cd4394ad2a4ac9aff510a2 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/3cc1cafb98cd4394ad2a4ac9aff510a2 2024-11-20T19:25:56,022 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/3cc1cafb98cd4394ad2a4ac9aff510a2, entries=150, sequenceid=364, filesize=12.0 K 2024-11-20T19:25:56,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/7b3a8d75a2fb47f5b3507573c4efbaf7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/7b3a8d75a2fb47f5b3507573c4efbaf7 2024-11-20T19:25:56,031 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/7b3a8d75a2fb47f5b3507573c4efbaf7, entries=150, sequenceid=364, filesize=12.0 K 2024-11-20T19:25:56,033 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 550c6d15b8cc28d8b0f43501c9366c37 in 619ms, sequenceid=364, compaction requested=true 2024-11-20T19:25:56,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:56,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:56,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-20T19:25:56,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-20T19:25:56,036 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-20T19:25:56,036 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7840 sec 2024-11-20T19:25:56,037 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 2.7890 sec 2024-11-20T19:25:56,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:25:56,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:56,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:56,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:56,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:56,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:56,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:56,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:56,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d102f773b1ef4e64a71136feef47e17f_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130755473/Put/seqid=0 2024-11-20T19:25:56,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742040_1216 (size=14994) 2024-11-20T19:25:56,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130816148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,152 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:56,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130816150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,158 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d102f773b1ef4e64a71136feef47e17f_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d102f773b1ef4e64a71136feef47e17f_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:56,160 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/4c8b94b464554bb08ec32c97932e9374, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:56,160 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/4c8b94b464554bb08ec32c97932e9374 is 175, key is test_row_0/A:col10/1732130755473/Put/seqid=0 2024-11-20T19:25:56,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742041_1217 (size=39949) 2024-11-20T19:25:56,181 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=377, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/4c8b94b464554bb08ec32c97932e9374 2024-11-20T19:25:56,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/4978dbd429f542148578fcd054482c68 is 50, key is test_row_0/B:col10/1732130755473/Put/seqid=0 2024-11-20T19:25:56,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742042_1218 (size=12301) 2024-11-20T19:25:56,233 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/4978dbd429f542148578fcd054482c68 2024-11-20T19:25:56,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/6f5ef2555335497bb2cfec3a5b9ba389 is 50, key is test_row_0/C:col10/1732130755473/Put/seqid=0 2024-11-20T19:25:56,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130816252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130816257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742043_1219 (size=12301) 2024-11-20T19:25:56,295 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/6f5ef2555335497bb2cfec3a5b9ba389 2024-11-20T19:25:56,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/4c8b94b464554bb08ec32c97932e9374 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4c8b94b464554bb08ec32c97932e9374 2024-11-20T19:25:56,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4c8b94b464554bb08ec32c97932e9374, entries=200, sequenceid=377, filesize=39.0 K 2024-11-20T19:25:56,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/4978dbd429f542148578fcd054482c68 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/4978dbd429f542148578fcd054482c68 2024-11-20T19:25:56,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/4978dbd429f542148578fcd054482c68, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T19:25:56,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/6f5ef2555335497bb2cfec3a5b9ba389 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/6f5ef2555335497bb2cfec3a5b9ba389 2024-11-20T19:25:56,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/6f5ef2555335497bb2cfec3a5b9ba389, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T19:25:56,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 550c6d15b8cc28d8b0f43501c9366c37 in 239ms, sequenceid=377, compaction requested=true 2024-11-20T19:25:56,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:56,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:56,330 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:56,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:56,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:56,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:56,331 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:56,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:56,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:56,332 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134430 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:56,332 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:56,332 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/A is initiating minor compaction (all files) 2024-11-20T19:25:56,332 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/B is initiating minor compaction (all files) 2024-11-20T19:25:56,332 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/A in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:56,332 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/B in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:56,332 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/213595b1fb104bec84795735db6b275a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/326569ca34b54de3804111c3ad3bc73e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/3cc1cafb98cd4394ad2a4ac9aff510a2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/4978dbd429f542148578fcd054482c68] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=48.8 K 2024-11-20T19:25:56,332 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/ae74f069859642af922619467a43b867, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d8e6ba54f93d4d41ae1d714b5a17d3a6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/03e5bd1f2f8c40be9d85e34ef8307a39, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4c8b94b464554bb08ec32c97932e9374] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=131.3 K 2024-11-20T19:25:56,332 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:56,332 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/ae74f069859642af922619467a43b867, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d8e6ba54f93d4d41ae1d714b5a17d3a6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/03e5bd1f2f8c40be9d85e34ef8307a39, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4c8b94b464554bb08ec32c97932e9374] 2024-11-20T19:25:56,332 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 213595b1fb104bec84795735db6b275a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732130753299 2024-11-20T19:25:56,333 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae74f069859642af922619467a43b867, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732130753299 2024-11-20T19:25:56,333 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 326569ca34b54de3804111c3ad3bc73e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732130753972 2024-11-20T19:25:56,333 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8e6ba54f93d4d41ae1d714b5a17d3a6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732130753972 2024-11-20T19:25:56,333 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cc1cafb98cd4394ad2a4ac9aff510a2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732130754730 2024-11-20T19:25:56,333 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03e5bd1f2f8c40be9d85e34ef8307a39, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732130754730 2024-11-20T19:25:56,333 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 4978dbd429f542148578fcd054482c68, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732130755473 2024-11-20T19:25:56,334 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c8b94b464554bb08ec32c97932e9374, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732130755473 2024-11-20T19:25:56,348 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:56,358 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#B#compaction#178 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:56,359 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/25080712d4fa4dfe9ce55d378ac0844f is 50, key is test_row_0/B:col10/1732130755473/Put/seqid=0 2024-11-20T19:25:56,372 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411206bb31beedbb04c0baf1e45c204cae35e_550c6d15b8cc28d8b0f43501c9366c37 store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:56,374 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411206bb31beedbb04c0baf1e45c204cae35e_550c6d15b8cc28d8b0f43501c9366c37, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:56,375 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206bb31beedbb04c0baf1e45c204cae35e_550c6d15b8cc28d8b0f43501c9366c37 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:56,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742044_1220 (size=13153) 2024-11-20T19:25:56,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:56,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:25:56,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:56,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:56,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:56,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:56,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:56,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:56,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742045_1221 (size=4469) 2024-11-20T19:25:56,423 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#A#compaction#177 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:56,423 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/d1ec3699f38f4b61b22c57507009704f is 175, key is test_row_0/A:col10/1732130755473/Put/seqid=0 2024-11-20T19:25:56,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202e99da3b04714e15828adc52713fc84e_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130756407/Put/seqid=0 2024-11-20T19:25:56,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742046_1222 (size=32107) 2024-11-20T19:25:56,458 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/d1ec3699f38f4b61b22c57507009704f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1ec3699f38f4b61b22c57507009704f 2024-11-20T19:25:56,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130816457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130816462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130816463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,474 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/A of 550c6d15b8cc28d8b0f43501c9366c37 into d1ec3699f38f4b61b22c57507009704f(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:56,474 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:56,474 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/A, priority=12, startTime=1732130756330; duration=0sec 2024-11-20T19:25:56,474 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:56,474 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:A 2024-11-20T19:25:56,474 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:56,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742047_1223 (size=14994) 2024-11-20T19:25:56,475 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:56,477 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:56,477 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/C is initiating minor compaction (all files) 2024-11-20T19:25:56,477 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/C in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:56,477 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/390149982a1d490e95b58a62d09565aa, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/0b00ab54557b4c1596f98c0c57829aca, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/7b3a8d75a2fb47f5b3507573c4efbaf7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/6f5ef2555335497bb2cfec3a5b9ba389] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=48.8 K 2024-11-20T19:25:56,478 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 390149982a1d490e95b58a62d09565aa, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732130753299 2024-11-20T19:25:56,479 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b00ab54557b4c1596f98c0c57829aca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732130753972 2024-11-20T19:25:56,479 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b3a8d75a2fb47f5b3507573c4efbaf7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732130754730 2024-11-20T19:25:56,480 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f5ef2555335497bb2cfec3a5b9ba389, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732130755473 2024-11-20T19:25:56,481 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202e99da3b04714e15828adc52713fc84e_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202e99da3b04714e15828adc52713fc84e_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:56,483 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/484704d11c5545bda6a8ee237b0daff5, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:56,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/484704d11c5545bda6a8ee237b0daff5 is 175, key is test_row_0/A:col10/1732130756407/Put/seqid=0 2024-11-20T19:25:56,504 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#C#compaction#180 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:56,504 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/f61340ea5b72467986a15e1444e814f6 is 50, key is test_row_0/C:col10/1732130755473/Put/seqid=0 2024-11-20T19:25:56,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742048_1224 (size=39949) 2024-11-20T19:25:56,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130816565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130816565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742049_1225 (size=13153) 2024-11-20T19:25:56,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130816764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130816768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130816769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,812 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/25080712d4fa4dfe9ce55d378ac0844f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/25080712d4fa4dfe9ce55d378ac0844f 2024-11-20T19:25:56,819 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/B of 550c6d15b8cc28d8b0f43501c9366c37 into 25080712d4fa4dfe9ce55d378ac0844f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:56,819 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:56,819 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/B, priority=12, startTime=1732130756331; duration=0sec 2024-11-20T19:25:56,820 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:56,820 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:B 2024-11-20T19:25:56,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130816888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:56,930 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=401, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/484704d11c5545bda6a8ee237b0daff5 2024-11-20T19:25:56,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/a6ad61ec84d344e8998ae3d3a265b345 is 50, key is test_row_0/B:col10/1732130756407/Put/seqid=0 2024-11-20T19:25:56,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742050_1226 (size=12301) 2024-11-20T19:25:56,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=401 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/a6ad61ec84d344e8998ae3d3a265b345 2024-11-20T19:25:57,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/c46ae3db97c14caf8c365e49006695ea is 50, key is test_row_0/C:col10/1732130756407/Put/seqid=0 2024-11-20T19:25:57,031 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/f61340ea5b72467986a15e1444e814f6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f61340ea5b72467986a15e1444e814f6 2024-11-20T19:25:57,042 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/C of 550c6d15b8cc28d8b0f43501c9366c37 into f61340ea5b72467986a15e1444e814f6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:57,042 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:57,042 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/C, priority=12, startTime=1732130756331; duration=0sec 2024-11-20T19:25:57,042 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:57,042 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:C 2024-11-20T19:25:57,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742051_1227 (size=12301) 2024-11-20T19:25:57,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=401 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/c46ae3db97c14caf8c365e49006695ea 2024-11-20T19:25:57,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/484704d11c5545bda6a8ee237b0daff5 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/484704d11c5545bda6a8ee237b0daff5 2024-11-20T19:25:57,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/484704d11c5545bda6a8ee237b0daff5, entries=200, sequenceid=401, filesize=39.0 K 2024-11-20T19:25:57,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/a6ad61ec84d344e8998ae3d3a265b345 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a6ad61ec84d344e8998ae3d3a265b345 2024-11-20T19:25:57,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a6ad61ec84d344e8998ae3d3a265b345, entries=150, sequenceid=401, filesize=12.0 K 2024-11-20T19:25:57,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/c46ae3db97c14caf8c365e49006695ea as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/c46ae3db97c14caf8c365e49006695ea 2024-11-20T19:25:57,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130817074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:57,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130817081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:57,094 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/c46ae3db97c14caf8c365e49006695ea, entries=150, sequenceid=401, filesize=12.0 K 2024-11-20T19:25:57,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 550c6d15b8cc28d8b0f43501c9366c37 in 687ms, sequenceid=401, compaction requested=false 2024-11-20T19:25:57,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:57,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:57,290 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:25:57,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:57,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:57,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:57,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:57,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:57,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:57,321 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203cc2d04ca8d64be6a5d0940535793822_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130756461/Put/seqid=0 2024-11-20T19:25:57,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T19:25:57,355 INFO [Thread-610 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-20T19:25:57,357 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:57,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-20T19:25:57,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T19:25:57,359 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:57,360 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:57,360 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:57,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742052_1228 (size=14994) 2024-11-20T19:25:57,381 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:57,388 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203cc2d04ca8d64be6a5d0940535793822_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203cc2d04ca8d64be6a5d0940535793822_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:57,389 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/e06bb9c6af7f4bc4b62c5299f8638718, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:57,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/e06bb9c6af7f4bc4b62c5299f8638718 is 175, key is test_row_0/A:col10/1732130756461/Put/seqid=0 2024-11-20T19:25:57,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742053_1229 (size=39949) 2024-11-20T19:25:57,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130817443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:57,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T19:25:57,512 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:57,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T19:25:57,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:57,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:57,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:57,512 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:57,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:57,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:57,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130817560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:57,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130817583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:57,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130817589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:57,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T19:25:57,665 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:57,666 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T19:25:57,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:57,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:57,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:57,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:57,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:57,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:57,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130817762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:57,818 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:57,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T19:25:57,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:57,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:57,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:57,819 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:57,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:57,829 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=417, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/e06bb9c6af7f4bc4b62c5299f8638718 2024-11-20T19:25:57,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/3bcef55ecae040ac860a02c171bb5f02 is 50, key is test_row_0/B:col10/1732130756461/Put/seqid=0 2024-11-20T19:25:57,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742054_1230 (size=12301) 2024-11-20T19:25:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T19:25:57,971 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:57,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T19:25:57,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:57,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:57,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:57,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:57,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130818069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,125 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T19:25:58,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:58,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:58,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:58,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/3bcef55ecae040ac860a02c171bb5f02 2024-11-20T19:25:58,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/fc4f4be882e24c4e8f451744e99ce501 is 50, key is test_row_0/C:col10/1732130756461/Put/seqid=0 2024-11-20T19:25:58,279 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T19:25:58,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:58,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:58,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:58,280 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742055_1231 (size=12301) 2024-11-20T19:25:58,316 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/fc4f4be882e24c4e8f451744e99ce501 2024-11-20T19:25:58,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/e06bb9c6af7f4bc4b62c5299f8638718 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e06bb9c6af7f4bc4b62c5299f8638718 2024-11-20T19:25:58,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e06bb9c6af7f4bc4b62c5299f8638718, entries=200, sequenceid=417, filesize=39.0 K 2024-11-20T19:25:58,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/3bcef55ecae040ac860a02c171bb5f02 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/3bcef55ecae040ac860a02c171bb5f02 2024-11-20T19:25:58,336 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/3bcef55ecae040ac860a02c171bb5f02, entries=150, sequenceid=417, filesize=12.0 K 2024-11-20T19:25:58,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/fc4f4be882e24c4e8f451744e99ce501 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/fc4f4be882e24c4e8f451744e99ce501 2024-11-20T19:25:58,348 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/fc4f4be882e24c4e8f451744e99ce501, entries=150, sequenceid=417, filesize=12.0 K 2024-11-20T19:25:58,349 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 550c6d15b8cc28d8b0f43501c9366c37 in 1060ms, sequenceid=417, compaction requested=true 2024-11-20T19:25:58,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:58,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:58,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:58,349 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:58,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:58,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:58,349 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:58,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:58,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:58,351 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112005 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:58,351 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:58,351 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/B is initiating minor compaction (all files) 2024-11-20T19:25:58,351 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/A is initiating minor compaction (all files) 2024-11-20T19:25:58,351 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/A in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:58,351 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/B in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:58,351 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/25080712d4fa4dfe9ce55d378ac0844f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a6ad61ec84d344e8998ae3d3a265b345, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/3bcef55ecae040ac860a02c171bb5f02] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=36.9 K 2024-11-20T19:25:58,351 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1ec3699f38f4b61b22c57507009704f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/484704d11c5545bda6a8ee237b0daff5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e06bb9c6af7f4bc4b62c5299f8638718] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=109.4 K 2024-11-20T19:25:58,351 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:58,351 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1ec3699f38f4b61b22c57507009704f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/484704d11c5545bda6a8ee237b0daff5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e06bb9c6af7f4bc4b62c5299f8638718] 2024-11-20T19:25:58,352 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 25080712d4fa4dfe9ce55d378ac0844f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732130755473 2024-11-20T19:25:58,352 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a6ad61ec84d344e8998ae3d3a265b345, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=401, earliestPutTs=1732130756132 2024-11-20T19:25:58,352 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1ec3699f38f4b61b22c57507009704f, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732130755473 2024-11-20T19:25:58,352 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 484704d11c5545bda6a8ee237b0daff5, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=401, earliestPutTs=1732130756132 2024-11-20T19:25:58,352 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bcef55ecae040ac860a02c171bb5f02, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1732130756456 2024-11-20T19:25:58,354 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting e06bb9c6af7f4bc4b62c5299f8638718, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1732130756446 2024-11-20T19:25:58,373 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#B#compaction#186 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:58,374 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/66abe1b5857d4846a8cc244b8f9d83c0 is 50, key is test_row_0/B:col10/1732130756461/Put/seqid=0 2024-11-20T19:25:58,387 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:58,408 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120761a87f26e3c4b94875548076246eec3_550c6d15b8cc28d8b0f43501c9366c37 store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:58,410 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120761a87f26e3c4b94875548076246eec3_550c6d15b8cc28d8b0f43501c9366c37, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:58,411 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120761a87f26e3c4b94875548076246eec3_550c6d15b8cc28d8b0f43501c9366c37 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:58,432 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T19:25:58,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:58,435 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:25:58,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:58,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:58,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:58,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:58,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:58,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:58,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742056_1232 (size=13255) 2024-11-20T19:25:58,449 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/66abe1b5857d4846a8cc244b8f9d83c0 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/66abe1b5857d4846a8cc244b8f9d83c0 2024-11-20T19:25:58,455 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/B of 550c6d15b8cc28d8b0f43501c9366c37 into 66abe1b5857d4846a8cc244b8f9d83c0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:58,455 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:58,455 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/B, priority=13, startTime=1732130758349; duration=0sec 2024-11-20T19:25:58,455 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:58,455 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:B 2024-11-20T19:25:58,455 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:58,456 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:58,456 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/C is initiating minor compaction (all files) 2024-11-20T19:25:58,457 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/C in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:58,457 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f61340ea5b72467986a15e1444e814f6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/c46ae3db97c14caf8c365e49006695ea, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/fc4f4be882e24c4e8f451744e99ce501] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=36.9 K 2024-11-20T19:25:58,457 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting f61340ea5b72467986a15e1444e814f6, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732130755473 2024-11-20T19:25:58,457 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c46ae3db97c14caf8c365e49006695ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=401, earliestPutTs=1732130756132 2024-11-20T19:25:58,458 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting fc4f4be882e24c4e8f451744e99ce501, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1732130756456 2024-11-20T19:25:58,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T19:25:58,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742057_1233 (size=4469) 2024-11-20T19:25:58,469 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#A#compaction#187 average throughput is 0.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:58,470 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/1e4ab25e9c93490db014ea3819bd1cf6 is 175, key is test_row_0/A:col10/1732130756461/Put/seqid=0 2024-11-20T19:25:58,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112047c9effbeb114c66819a90711448f5fd_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130757434/Put/seqid=0 2024-11-20T19:25:58,485 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#C#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:58,486 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/2193499af78c455b8caf838d0f9a4dba is 50, key is test_row_0/C:col10/1732130756461/Put/seqid=0 2024-11-20T19:25:58,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742058_1234 (size=32209) 2024-11-20T19:25:58,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742059_1235 (size=12454) 2024-11-20T19:25:58,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:58,561 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112047c9effbeb114c66819a90711448f5fd_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112047c9effbeb114c66819a90711448f5fd_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:58,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/c5a0bde1e9e04f81ace78c00a36086c1, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:58,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/c5a0bde1e9e04f81ace78c00a36086c1 is 175, key is test_row_0/A:col10/1732130757434/Put/seqid=0 2024-11-20T19:25:58,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:58,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:58,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742060_1236 (size=13255) 2024-11-20T19:25:58,581 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/2193499af78c455b8caf838d0f9a4dba as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/2193499af78c455b8caf838d0f9a4dba 2024-11-20T19:25:58,587 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/C of 550c6d15b8cc28d8b0f43501c9366c37 into 2193499af78c455b8caf838d0f9a4dba(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:58,588 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:58,588 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/C, priority=13, startTime=1732130758349; duration=0sec 2024-11-20T19:25:58,588 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:58,588 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:C 2024-11-20T19:25:58,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742061_1237 (size=31255) 2024-11-20T19:25:58,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130818610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130818610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130818612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130818714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130818714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130818714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37602 deadline: 1732130818893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,897 DEBUG [Thread-602 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:58,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130818919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130818919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130818920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:58,934 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/1e4ab25e9c93490db014ea3819bd1cf6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1e4ab25e9c93490db014ea3819bd1cf6 2024-11-20T19:25:58,958 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/A of 550c6d15b8cc28d8b0f43501c9366c37 into 1e4ab25e9c93490db014ea3819bd1cf6(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:58,958 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:58,958 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/A, priority=13, startTime=1732130758349; duration=0sec 2024-11-20T19:25:58,958 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:58,958 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:A 2024-11-20T19:25:59,007 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=440, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/c5a0bde1e9e04f81ace78c00a36086c1 2024-11-20T19:25:59,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/5f44807f7da04a9ba8e6e6268c2cbdf0 is 50, key is test_row_0/B:col10/1732130757434/Put/seqid=0 2024-11-20T19:25:59,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742062_1238 (size=12301) 2024-11-20T19:25:59,054 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/5f44807f7da04a9ba8e6e6268c2cbdf0 2024-11-20T19:25:59,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/542f4033fd424801b8e548592f20063f is 50, key is test_row_0/C:col10/1732130757434/Put/seqid=0 2024-11-20T19:25:59,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742063_1239 (size=12301) 2024-11-20T19:25:59,110 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/542f4033fd424801b8e548592f20063f 2024-11-20T19:25:59,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/c5a0bde1e9e04f81ace78c00a36086c1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/c5a0bde1e9e04f81ace78c00a36086c1 2024-11-20T19:25:59,127 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/c5a0bde1e9e04f81ace78c00a36086c1, entries=150, sequenceid=440, filesize=30.5 K 2024-11-20T19:25:59,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/5f44807f7da04a9ba8e6e6268c2cbdf0 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5f44807f7da04a9ba8e6e6268c2cbdf0 2024-11-20T19:25:59,137 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5f44807f7da04a9ba8e6e6268c2cbdf0, entries=150, sequenceid=440, filesize=12.0 K 2024-11-20T19:25:59,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/542f4033fd424801b8e548592f20063f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/542f4033fd424801b8e548592f20063f 2024-11-20T19:25:59,149 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/542f4033fd424801b8e548592f20063f, entries=150, sequenceid=440, filesize=12.0 K 2024-11-20T19:25:59,150 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 550c6d15b8cc28d8b0f43501c9366c37 in 716ms, sequenceid=440, compaction requested=false 2024-11-20T19:25:59,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:59,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:59,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-20T19:25:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-20T19:25:59,155 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-20T19:25:59,155 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7920 sec 2024-11-20T19:25:59,156 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.7980 sec 2024-11-20T19:25:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:59,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T19:25:59,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:59,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:59,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:59,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:59,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:59,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:59,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e3b28d5a4ff74d5cbdeb06693cd4fd49_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130759225/Put/seqid=0 2024-11-20T19:25:59,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742064_1240 (size=12454) 2024-11-20T19:25:59,273 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:59,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130819270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130819270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,280 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e3b28d5a4ff74d5cbdeb06693cd4fd49_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e3b28d5a4ff74d5cbdeb06693cd4fd49_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:59,282 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/1000095174d64a43b23b8f10bf5c232e, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:59,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130819276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/1000095174d64a43b23b8f10bf5c232e is 175, key is test_row_0/A:col10/1732130759225/Put/seqid=0 2024-11-20T19:25:59,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742065_1241 (size=31255) 2024-11-20T19:25:59,328 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=458, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/1000095174d64a43b23b8f10bf5c232e 2024-11-20T19:25:59,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/7ad6833f24f04a838623a03ad400487b is 50, key is test_row_0/B:col10/1732130759225/Put/seqid=0 2024-11-20T19:25:59,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130819377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130819377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742066_1242 (size=12301) 2024-11-20T19:25:59,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=458 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/7ad6833f24f04a838623a03ad400487b 2024-11-20T19:25:59,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130819385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/46334b42977a48848d150c5219345a12 is 50, key is test_row_0/C:col10/1732130759225/Put/seqid=0 2024-11-20T19:25:59,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742067_1243 (size=12301) 2024-11-20T19:25:59,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T19:25:59,463 INFO [Thread-610 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-20T19:25:59,465 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:59,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-20T19:25:59,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:25:59,468 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:59,469 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:59,469 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:25:59,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130819594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130819595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130819594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,625 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:25:59,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:59,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:59,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:59,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37542 deadline: 1732130819625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,627 DEBUG [Thread-606 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:59,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,773 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T19:25:59,779 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:25:59,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:59,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:59,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:59,779 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:25:59,837 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=458 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/46334b42977a48848d150c5219345a12 2024-11-20T19:25:59,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/1000095174d64a43b23b8f10bf5c232e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1000095174d64a43b23b8f10bf5c232e 2024-11-20T19:25:59,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1000095174d64a43b23b8f10bf5c232e, entries=150, sequenceid=458, filesize=30.5 K 2024-11-20T19:25:59,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/7ad6833f24f04a838623a03ad400487b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/7ad6833f24f04a838623a03ad400487b 2024-11-20T19:25:59,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/7ad6833f24f04a838623a03ad400487b, entries=150, sequenceid=458, filesize=12.0 K 2024-11-20T19:25:59,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/46334b42977a48848d150c5219345a12 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/46334b42977a48848d150c5219345a12 2024-11-20T19:25:59,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/46334b42977a48848d150c5219345a12, entries=150, sequenceid=458, filesize=12.0 K 2024-11-20T19:25:59,879 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 550c6d15b8cc28d8b0f43501c9366c37 in 644ms, sequenceid=458, compaction requested=true 2024-11-20T19:25:59,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:25:59,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:59,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:59,880 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:59,880 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:59,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:59,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:59,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 550c6d15b8cc28d8b0f43501c9366c37:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:59,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:59,881 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94719 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:59,881 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/A is initiating minor compaction (all files) 2024-11-20T19:25:59,881 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:59,881 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/A in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:59,881 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/B is initiating minor compaction (all files) 2024-11-20T19:25:59,881 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/B in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:59,881 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1e4ab25e9c93490db014ea3819bd1cf6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/c5a0bde1e9e04f81ace78c00a36086c1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1000095174d64a43b23b8f10bf5c232e] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=92.5 K 2024-11-20T19:25:59,881 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/66abe1b5857d4846a8cc244b8f9d83c0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5f44807f7da04a9ba8e6e6268c2cbdf0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/7ad6833f24f04a838623a03ad400487b] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=37.0 K 2024-11-20T19:25:59,881 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:59,881 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1e4ab25e9c93490db014ea3819bd1cf6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/c5a0bde1e9e04f81ace78c00a36086c1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1000095174d64a43b23b8f10bf5c232e] 2024-11-20T19:25:59,882 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 66abe1b5857d4846a8cc244b8f9d83c0, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1732130756456 2024-11-20T19:25:59,883 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f44807f7da04a9ba8e6e6268c2cbdf0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1732130757385 2024-11-20T19:25:59,883 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e4ab25e9c93490db014ea3819bd1cf6, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1732130756456 2024-11-20T19:25:59,883 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5a0bde1e9e04f81ace78c00a36086c1, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1732130757385 2024-11-20T19:25:59,883 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ad6833f24f04a838623a03ad400487b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1732130758610 2024-11-20T19:25:59,883 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1000095174d64a43b23b8f10bf5c232e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1732130758610 2024-11-20T19:25:59,894 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:59,896 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#B#compaction#196 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:59,897 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/01868bb07f5d4013afd18e35abd0fa54 is 50, key is test_row_0/B:col10/1732130759225/Put/seqid=0 2024-11-20T19:25:59,903 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T19:25:59,903 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120b54b02ac9ad0455ebbe0e96de05732ba_550c6d15b8cc28d8b0f43501c9366c37 store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:59,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:25:59,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:59,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:25:59,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:59,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:25:59,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:25:59,905 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120b54b02ac9ad0455ebbe0e96de05732ba_550c6d15b8cc28d8b0f43501c9366c37, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:59,905 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b54b02ac9ad0455ebbe0e96de05732ba_550c6d15b8cc28d8b0f43501c9366c37 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:25:59,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742068_1244 (size=13357) 2024-11-20T19:25:59,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742069_1245 (size=4469) 2024-11-20T19:25:59,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130819924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130819925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130819926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,932 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:25:59,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:25:59,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:59,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:25:59,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:25:59,935 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205fb3c2bcba6c452b911305eb20f9e2c2_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130759900/Put/seqid=0 2024-11-20T19:25:59,940 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#A#compaction#195 average throughput is 0.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:59,941 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/9b3715d8a7554d239a49b61d633d51a3 is 175, key is test_row_0/A:col10/1732130759225/Put/seqid=0 2024-11-20T19:25:59,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742070_1246 (size=17534) 2024-11-20T19:26:00,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742071_1247 (size=32311) 2024-11-20T19:26:00,015 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/9b3715d8a7554d239a49b61d633d51a3 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/9b3715d8a7554d239a49b61d633d51a3 2024-11-20T19:26:00,022 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/A of 550c6d15b8cc28d8b0f43501c9366c37 into 9b3715d8a7554d239a49b61d633d51a3(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:00,022 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:26:00,022 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/A, priority=13, startTime=1732130759880; duration=0sec 2024-11-20T19:26:00,022 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:00,022 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:A 2024-11-20T19:26:00,022 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:00,024 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:00,024 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 550c6d15b8cc28d8b0f43501c9366c37/C is initiating minor compaction (all files) 2024-11-20T19:26:00,024 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 550c6d15b8cc28d8b0f43501c9366c37/C in TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,024 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/2193499af78c455b8caf838d0f9a4dba, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/542f4033fd424801b8e548592f20063f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/46334b42977a48848d150c5219345a12] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp, totalSize=37.0 K 2024-11-20T19:26:00,024 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2193499af78c455b8caf838d0f9a4dba, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1732130756456 2024-11-20T19:26:00,025 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 542f4033fd424801b8e548592f20063f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1732130757385 2024-11-20T19:26:00,025 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46334b42977a48848d150c5219345a12, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1732130758610 2024-11-20T19:26:00,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130820031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130820031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130820033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,044 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 550c6d15b8cc28d8b0f43501c9366c37#C#compaction#198 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:00,045 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/11a55c93e6c646c6ab8517acc5bb0107 is 50, key is test_row_0/C:col10/1732130759225/Put/seqid=0 2024-11-20T19:26:00,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742072_1248 (size=13357) 2024-11-20T19:26:00,085 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/11a55c93e6c646c6ab8517acc5bb0107 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/11a55c93e6c646c6ab8517acc5bb0107 2024-11-20T19:26:00,087 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:00,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:26:00,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,088 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,093 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/C of 550c6d15b8cc28d8b0f43501c9366c37 into 11a55c93e6c646c6ab8517acc5bb0107(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:00,093 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:26:00,093 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/C, priority=13, startTime=1732130759880; duration=0sec 2024-11-20T19:26:00,093 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:00,093 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:C 2024-11-20T19:26:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:26:00,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130820235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130820235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,240 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:00,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:26:00,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130820243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,324 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/01868bb07f5d4013afd18e35abd0fa54 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/01868bb07f5d4013afd18e35abd0fa54 2024-11-20T19:26:00,341 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 550c6d15b8cc28d8b0f43501c9366c37/B of 550c6d15b8cc28d8b0f43501c9366c37 into 01868bb07f5d4013afd18e35abd0fa54(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:00,341 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:26:00,341 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37., storeName=550c6d15b8cc28d8b0f43501c9366c37/B, priority=13, startTime=1732130759880; duration=0sec 2024-11-20T19:26:00,341 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:00,341 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 550c6d15b8cc28d8b0f43501c9366c37:B 2024-11-20T19:26:00,380 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:00,385 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205fb3c2bcba6c452b911305eb20f9e2c2_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205fb3c2bcba6c452b911305eb20f9e2c2_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:00,386 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/09d9971105404d17adf27a4eb5d87780, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:26:00,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/09d9971105404d17adf27a4eb5d87780 is 175, key is test_row_0/A:col10/1732130759900/Put/seqid=0 2024-11-20T19:26:00,395 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:00,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:26:00,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,396 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742073_1249 (size=48639) 2024-11-20T19:26:00,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130820538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130820540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,548 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,548 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130820546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:26:00,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,548 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:26:00,609 DEBUG [Thread-611 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x367f47f7 to 127.0.0.1:49985 2024-11-20T19:26:00,609 DEBUG [Thread-611 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:00,612 DEBUG [Thread-613 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x247c0c93 to 127.0.0.1:49985 2024-11-20T19:26:00,612 DEBUG [Thread-613 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:00,613 DEBUG [Thread-615 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x517ff977 to 127.0.0.1:49985 2024-11-20T19:26:00,613 DEBUG [Thread-615 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:00,629 DEBUG [Thread-617 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3448d233 to 127.0.0.1:49985 2024-11-20T19:26:00,629 DEBUG [Thread-617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:00,700 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:00,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:26:00,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,701 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,826 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=481, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/09d9971105404d17adf27a4eb5d87780 2024-11-20T19:26:00,834 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/2d929c4e43da4f51af0205aeba6afe8f is 50, key is test_row_0/B:col10/1732130759900/Put/seqid=0 2024-11-20T19:26:00,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742074_1250 (size=12301) 2024-11-20T19:26:00,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/2d929c4e43da4f51af0205aeba6afe8f 2024-11-20T19:26:00,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/95925e8f55f640ed94257172477da0c7 is 50, key is test_row_0/C:col10/1732130759900/Put/seqid=0 2024-11-20T19:26:00,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742075_1251 (size=12301) 2024-11-20T19:26:00,853 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:00,853 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:00,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:26:00,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:00,854 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:01,005 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:01,006 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:01,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:01,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:26:01,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:01,006 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:01,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:01,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37554 deadline: 1732130821049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:01,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37590 deadline: 1732130821050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:01,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37578 deadline: 1732130821052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:01,158 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:01,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:01,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:01,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:26:01,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:01,159 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:01,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:01,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:01,249 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/95925e8f55f640ed94257172477da0c7 2024-11-20T19:26:01,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/09d9971105404d17adf27a4eb5d87780 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/09d9971105404d17adf27a4eb5d87780 2024-11-20T19:26:01,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/09d9971105404d17adf27a4eb5d87780, entries=250, sequenceid=481, filesize=47.5 K 2024-11-20T19:26:01,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/2d929c4e43da4f51af0205aeba6afe8f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/2d929c4e43da4f51af0205aeba6afe8f 2024-11-20T19:26:01,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/2d929c4e43da4f51af0205aeba6afe8f, entries=150, sequenceid=481, filesize=12.0 K 2024-11-20T19:26:01,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/95925e8f55f640ed94257172477da0c7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/95925e8f55f640ed94257172477da0c7 2024-11-20T19:26:01,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/95925e8f55f640ed94257172477da0c7, entries=150, sequenceid=481, filesize=12.0 K 2024-11-20T19:26:01,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 550c6d15b8cc28d8b0f43501c9366c37 in 1363ms, sequenceid=481, compaction requested=false 2024-11-20T19:26:01,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:26:01,311 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:01,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:01,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:01,312 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:26:01,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:26:01,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:01,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:26:01,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:01,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:26:01,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:01,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cc2da17724ad4a3ea44a748cc3f016c0_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130759922/Put/seqid=0 2024-11-20T19:26:01,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742076_1252 (size=12454) 2024-11-20T19:26:01,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:26:01,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:01,759 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cc2da17724ad4a3ea44a748cc3f016c0_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cc2da17724ad4a3ea44a748cc3f016c0_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:01,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/acb19ef338aa4835bde22ba9f9fecde0, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:26:01,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/acb19ef338aa4835bde22ba9f9fecde0 is 175, key is test_row_0/A:col10/1732130759922/Put/seqid=0 2024-11-20T19:26:01,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742077_1253 (size=31255) 2024-11-20T19:26:02,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:02,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. as already flushing 2024-11-20T19:26:02,053 DEBUG [Thread-604 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x176c5c1b to 127.0.0.1:49985 2024-11-20T19:26:02,053 DEBUG [Thread-604 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:02,056 DEBUG [Thread-608 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46114993 to 127.0.0.1:49985 2024-11-20T19:26:02,056 DEBUG [Thread-608 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:02,064 DEBUG [Thread-600 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c826820 to 127.0.0.1:49985 2024-11-20T19:26:02,065 DEBUG [Thread-600 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:02,171 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=497, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/acb19ef338aa4835bde22ba9f9fecde0 2024-11-20T19:26:02,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/44c5f0f1bccc419ab47fa26b2db53017 is 50, key is test_row_0/B:col10/1732130759922/Put/seqid=0 2024-11-20T19:26:02,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742078_1254 (size=12301) 2024-11-20T19:26:02,589 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/44c5f0f1bccc419ab47fa26b2db53017 2024-11-20T19:26:02,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/56b9a8f66c3e46a0bc66add520f3a057 is 50, key is test_row_0/C:col10/1732130759922/Put/seqid=0 2024-11-20T19:26:02,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742079_1255 (size=12301) 2024-11-20T19:26:02,935 DEBUG [Thread-602 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2931c73e to 127.0.0.1:49985 2024-11-20T19:26:02,935 DEBUG [Thread-602 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:03,006 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/56b9a8f66c3e46a0bc66add520f3a057 2024-11-20T19:26:03,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/acb19ef338aa4835bde22ba9f9fecde0 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/acb19ef338aa4835bde22ba9f9fecde0 2024-11-20T19:26:03,020 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/acb19ef338aa4835bde22ba9f9fecde0, entries=150, sequenceid=497, filesize=30.5 K 2024-11-20T19:26:03,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/44c5f0f1bccc419ab47fa26b2db53017 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/44c5f0f1bccc419ab47fa26b2db53017 2024-11-20T19:26:03,026 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/44c5f0f1bccc419ab47fa26b2db53017, entries=150, sequenceid=497, filesize=12.0 K 2024-11-20T19:26:03,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/56b9a8f66c3e46a0bc66add520f3a057 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/56b9a8f66c3e46a0bc66add520f3a057 2024-11-20T19:26:03,031 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/56b9a8f66c3e46a0bc66add520f3a057, entries=150, sequenceid=497, filesize=12.0 K 2024-11-20T19:26:03,032 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=26.84 KB/27480 for 550c6d15b8cc28d8b0f43501c9366c37 in 1720ms, sequenceid=497, compaction requested=true 2024-11-20T19:26:03,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:26:03,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:03,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-20T19:26:03,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-20T19:26:03,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-20T19:26:03,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5640 sec 2024-11-20T19:26:03,035 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 3.5690 sec 2024-11-20T19:26:03,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:26:03,598 INFO [Thread-610 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-20T19:26:04,642 DEBUG [master/db9c3a6c6492:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 26eb6e9aec5a60a946cc3400b187b0a4 changed from -1.0 to 0.0, refreshing cache 2024-11-20T19:26:09,679 DEBUG [Thread-606 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x190853fc to 127.0.0.1:49985 2024-11-20T19:26:09,680 DEBUG [Thread-606 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 91 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 112 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2868 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2806 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1296 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3888 rows 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1306 2024-11-20T19:26:09,680 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3918 rows 2024-11-20T19:26:09,680 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:26:09,680 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4e560c7b to 127.0.0.1:49985 2024-11-20T19:26:09,680 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:09,683 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T19:26:09,683 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T19:26:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:09,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T19:26:09,689 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130769689"}]},"ts":"1732130769689"} 2024-11-20T19:26:09,690 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T19:26:09,705 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T19:26:09,706 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:26:09,707 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=550c6d15b8cc28d8b0f43501c9366c37, UNASSIGN}] 2024-11-20T19:26:09,708 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=550c6d15b8cc28d8b0f43501c9366c37, UNASSIGN 2024-11-20T19:26:09,708 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=550c6d15b8cc28d8b0f43501c9366c37, regionState=CLOSING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:09,709 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:26:09,709 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure 550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:26:09,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T19:26:09,870 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:09,871 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:09,871 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:26:09,871 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing 550c6d15b8cc28d8b0f43501c9366c37, disabling compactions & flushes 2024-11-20T19:26:09,871 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:09,871 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:09,871 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. after waiting 0 ms 2024-11-20T19:26:09,871 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:09,871 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing 550c6d15b8cc28d8b0f43501c9366c37 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T19:26:09,871 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=A 2024-11-20T19:26:09,871 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:09,871 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=B 2024-11-20T19:26:09,871 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:09,871 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 550c6d15b8cc28d8b0f43501c9366c37, store=C 2024-11-20T19:26:09,872 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:09,878 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cd47aec2fd2e46b39fa6e8f5ca379499_550c6d15b8cc28d8b0f43501c9366c37 is 50, key is test_row_0/A:col10/1732130762055/Put/seqid=0 2024-11-20T19:26:09,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742080_1256 (size=12454) 2024-11-20T19:26:09,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T19:26:10,288 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:10,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T19:26:10,292 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cd47aec2fd2e46b39fa6e8f5ca379499_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cd47aec2fd2e46b39fa6e8f5ca379499_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:10,293 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/8c6ccea5c4a9466987a77bcf5c75f36d, store: [table=TestAcidGuarantees family=A region=550c6d15b8cc28d8b0f43501c9366c37] 2024-11-20T19:26:10,294 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/8c6ccea5c4a9466987a77bcf5c75f36d is 175, key is test_row_0/A:col10/1732130762055/Put/seqid=0 2024-11-20T19:26:10,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742081_1257 (size=31255) 2024-11-20T19:26:10,298 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=505, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/8c6ccea5c4a9466987a77bcf5c75f36d 2024-11-20T19:26:10,305 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/32ea247b59f343bfa8fdd5545d64c274 is 50, key is test_row_0/B:col10/1732130762055/Put/seqid=0 2024-11-20T19:26:10,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742082_1258 (size=12301) 2024-11-20T19:26:10,713 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=505 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/32ea247b59f343bfa8fdd5545d64c274 2024-11-20T19:26:10,720 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/f4d0fc125e794b5ca951febaa59737e4 is 50, key is test_row_0/C:col10/1732130762055/Put/seqid=0 2024-11-20T19:26:10,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742083_1259 (size=12301) 2024-11-20T19:26:10,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T19:26:11,132 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=505 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/f4d0fc125e794b5ca951febaa59737e4 2024-11-20T19:26:11,137 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/A/8c6ccea5c4a9466987a77bcf5c75f36d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/8c6ccea5c4a9466987a77bcf5c75f36d 2024-11-20T19:26:11,140 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/8c6ccea5c4a9466987a77bcf5c75f36d, entries=150, sequenceid=505, filesize=30.5 K 2024-11-20T19:26:11,142 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/B/32ea247b59f343bfa8fdd5545d64c274 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/32ea247b59f343bfa8fdd5545d64c274 2024-11-20T19:26:11,146 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/32ea247b59f343bfa8fdd5545d64c274, entries=150, sequenceid=505, filesize=12.0 K 2024-11-20T19:26:11,147 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/.tmp/C/f4d0fc125e794b5ca951febaa59737e4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f4d0fc125e794b5ca951febaa59737e4 2024-11-20T19:26:11,151 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f4d0fc125e794b5ca951febaa59737e4, entries=150, sequenceid=505, filesize=12.0 K 2024-11-20T19:26:11,152 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 550c6d15b8cc28d8b0f43501c9366c37 in 1281ms, sequenceid=505, compaction requested=true 2024-11-20T19:26:11,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5d093c1eef174910b977a6c15abd3cd0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/49b85e0e5b0546ef92f23f93e4b08d91, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/01d98db437944a37b30ed0557f306a67, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/dc898803156e497f83539a409f4d4a8f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/a7c9fa42226d4c68a9163bf79da6a0d3, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/2aef246337d042709b36993f07ddba1c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/47d19ad24c824e92ad681319632f7d04, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7645bcf826004b19bd2ab4eafd4ec161, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/920060d388b24b6895e745cc764c1933, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/ebc606cd9a904f268d193a1a936313fe, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e3bd698e5c35478ab71a5455c3015426, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7fac6ec9d6b94c7d8537a85b6f75824b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5777e0148be04c01a9725d939063b8d2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/97d5398e0f5b4cc3a479d2ad0238a65d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/27b186765d5b41918071fe0d72db837c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/cf1b65dea18d41de8fbd39bc8baf9ae4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/0451408ccb41448d8ab0ac03e308672e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/bb3b81c9d3364ba7b61efbdaafbf86f7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4b22b0aeeed44cea8305c86a77f1a8fa, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/83c032181fef45f592235b592983b92e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1f2bfb0ab9c4e3f962ae4c91d1095d8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/ae74f069859642af922619467a43b867, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d8e6ba54f93d4d41ae1d714b5a17d3a6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/03e5bd1f2f8c40be9d85e34ef8307a39, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4c8b94b464554bb08ec32c97932e9374, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1ec3699f38f4b61b22c57507009704f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/484704d11c5545bda6a8ee237b0daff5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e06bb9c6af7f4bc4b62c5299f8638718, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1e4ab25e9c93490db014ea3819bd1cf6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/c5a0bde1e9e04f81ace78c00a36086c1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1000095174d64a43b23b8f10bf5c232e] to archive 2024-11-20T19:26:11,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:26:11,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5d093c1eef174910b977a6c15abd3cd0 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5d093c1eef174910b977a6c15abd3cd0 2024-11-20T19:26:11,158 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/49b85e0e5b0546ef92f23f93e4b08d91 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/49b85e0e5b0546ef92f23f93e4b08d91 2024-11-20T19:26:11,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/01d98db437944a37b30ed0557f306a67 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/01d98db437944a37b30ed0557f306a67 2024-11-20T19:26:11,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/dc898803156e497f83539a409f4d4a8f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/dc898803156e497f83539a409f4d4a8f 2024-11-20T19:26:11,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/a7c9fa42226d4c68a9163bf79da6a0d3 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/a7c9fa42226d4c68a9163bf79da6a0d3 2024-11-20T19:26:11,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/2aef246337d042709b36993f07ddba1c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/2aef246337d042709b36993f07ddba1c 2024-11-20T19:26:11,166 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/47d19ad24c824e92ad681319632f7d04 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/47d19ad24c824e92ad681319632f7d04 2024-11-20T19:26:11,167 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7645bcf826004b19bd2ab4eafd4ec161 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7645bcf826004b19bd2ab4eafd4ec161 2024-11-20T19:26:11,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/920060d388b24b6895e745cc764c1933 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/920060d388b24b6895e745cc764c1933 2024-11-20T19:26:11,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/ebc606cd9a904f268d193a1a936313fe to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/ebc606cd9a904f268d193a1a936313fe 2024-11-20T19:26:11,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e3bd698e5c35478ab71a5455c3015426 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e3bd698e5c35478ab71a5455c3015426 2024-11-20T19:26:11,173 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7fac6ec9d6b94c7d8537a85b6f75824b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/7fac6ec9d6b94c7d8537a85b6f75824b 2024-11-20T19:26:11,175 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5777e0148be04c01a9725d939063b8d2 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/5777e0148be04c01a9725d939063b8d2 2024-11-20T19:26:11,176 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/97d5398e0f5b4cc3a479d2ad0238a65d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/97d5398e0f5b4cc3a479d2ad0238a65d 2024-11-20T19:26:11,178 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/27b186765d5b41918071fe0d72db837c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/27b186765d5b41918071fe0d72db837c 2024-11-20T19:26:11,179 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/cf1b65dea18d41de8fbd39bc8baf9ae4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/cf1b65dea18d41de8fbd39bc8baf9ae4 2024-11-20T19:26:11,181 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/0451408ccb41448d8ab0ac03e308672e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/0451408ccb41448d8ab0ac03e308672e 2024-11-20T19:26:11,182 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/bb3b81c9d3364ba7b61efbdaafbf86f7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/bb3b81c9d3364ba7b61efbdaafbf86f7 2024-11-20T19:26:11,184 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4b22b0aeeed44cea8305c86a77f1a8fa to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4b22b0aeeed44cea8305c86a77f1a8fa 2024-11-20T19:26:11,185 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/83c032181fef45f592235b592983b92e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/83c032181fef45f592235b592983b92e 2024-11-20T19:26:11,186 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1f2bfb0ab9c4e3f962ae4c91d1095d8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1f2bfb0ab9c4e3f962ae4c91d1095d8 2024-11-20T19:26:11,189 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/ae74f069859642af922619467a43b867 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/ae74f069859642af922619467a43b867 2024-11-20T19:26:11,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d8e6ba54f93d4d41ae1d714b5a17d3a6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d8e6ba54f93d4d41ae1d714b5a17d3a6 2024-11-20T19:26:11,192 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/03e5bd1f2f8c40be9d85e34ef8307a39 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/03e5bd1f2f8c40be9d85e34ef8307a39 2024-11-20T19:26:11,194 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4c8b94b464554bb08ec32c97932e9374 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/4c8b94b464554bb08ec32c97932e9374 2024-11-20T19:26:11,195 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1ec3699f38f4b61b22c57507009704f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/d1ec3699f38f4b61b22c57507009704f 2024-11-20T19:26:11,197 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/484704d11c5545bda6a8ee237b0daff5 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/484704d11c5545bda6a8ee237b0daff5 2024-11-20T19:26:11,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e06bb9c6af7f4bc4b62c5299f8638718 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/e06bb9c6af7f4bc4b62c5299f8638718 2024-11-20T19:26:11,199 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1e4ab25e9c93490db014ea3819bd1cf6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1e4ab25e9c93490db014ea3819bd1cf6 2024-11-20T19:26:11,201 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/c5a0bde1e9e04f81ace78c00a36086c1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/c5a0bde1e9e04f81ace78c00a36086c1 2024-11-20T19:26:11,204 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1000095174d64a43b23b8f10bf5c232e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/1000095174d64a43b23b8f10bf5c232e 2024-11-20T19:26:11,206 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/61e5c3384e2c4311b10201aeb373fae1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/846829718ab44cf7a862b39d8e338902, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5318d971096c4d5695b447802adeb277, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/8800a7ee82b24d27b0652ef87a0cf764, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/78a66d65bf424f7d8de284f0c93b285d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/e8aba093c5ac42f2946fdcc386a9b797, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/c8fa962569904e9092e0825ae124b76a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5e8a9c436db34b498f3a481a6b68ecd3, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/9bfdfd35d3034e56981f19eb967c8d20, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a2e41f52e23e4255b2314d8d794f2463, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/69e78ab4ed2f4f14bf5f47306c1212fc, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/580309ddd5eb44919831685163871098, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a88f3f3fd572402fb2360eacac1afdf0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/fb76522c7d3c403280037f2d0de407e3, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/d25c244a576e495bbeb1c5b90f039e09, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/6dd0efb953a8482cb989e98e5a256217, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/894be7a2b8da473da2622bcc0ba15d71, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/8dbb37167a5d44cd9a8ed4863352d939, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/995033e7637c422ebfa776bb7db9f333, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/375c25c6fc864e2a854f7207944042f6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/213595b1fb104bec84795735db6b275a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/95bc29caab574e738b966b8f0c31d2bd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/326569ca34b54de3804111c3ad3bc73e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/3cc1cafb98cd4394ad2a4ac9aff510a2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/25080712d4fa4dfe9ce55d378ac0844f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/4978dbd429f542148578fcd054482c68, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a6ad61ec84d344e8998ae3d3a265b345, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/66abe1b5857d4846a8cc244b8f9d83c0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/3bcef55ecae040ac860a02c171bb5f02, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5f44807f7da04a9ba8e6e6268c2cbdf0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/7ad6833f24f04a838623a03ad400487b] to archive 2024-11-20T19:26:11,207 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:26:11,209 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/61e5c3384e2c4311b10201aeb373fae1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/61e5c3384e2c4311b10201aeb373fae1 2024-11-20T19:26:11,211 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/846829718ab44cf7a862b39d8e338902 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/846829718ab44cf7a862b39d8e338902 2024-11-20T19:26:11,212 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5318d971096c4d5695b447802adeb277 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5318d971096c4d5695b447802adeb277 2024-11-20T19:26:11,213 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/8800a7ee82b24d27b0652ef87a0cf764 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/8800a7ee82b24d27b0652ef87a0cf764 2024-11-20T19:26:11,215 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/78a66d65bf424f7d8de284f0c93b285d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/78a66d65bf424f7d8de284f0c93b285d 2024-11-20T19:26:11,216 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/e8aba093c5ac42f2946fdcc386a9b797 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/e8aba093c5ac42f2946fdcc386a9b797 2024-11-20T19:26:11,217 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/c8fa962569904e9092e0825ae124b76a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/c8fa962569904e9092e0825ae124b76a 2024-11-20T19:26:11,219 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5e8a9c436db34b498f3a481a6b68ecd3 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5e8a9c436db34b498f3a481a6b68ecd3 2024-11-20T19:26:11,220 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/9bfdfd35d3034e56981f19eb967c8d20 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/9bfdfd35d3034e56981f19eb967c8d20 2024-11-20T19:26:11,222 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a2e41f52e23e4255b2314d8d794f2463 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a2e41f52e23e4255b2314d8d794f2463 2024-11-20T19:26:11,223 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/69e78ab4ed2f4f14bf5f47306c1212fc to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/69e78ab4ed2f4f14bf5f47306c1212fc 2024-11-20T19:26:11,224 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/580309ddd5eb44919831685163871098 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/580309ddd5eb44919831685163871098 2024-11-20T19:26:11,226 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a88f3f3fd572402fb2360eacac1afdf0 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a88f3f3fd572402fb2360eacac1afdf0 2024-11-20T19:26:11,227 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/fb76522c7d3c403280037f2d0de407e3 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/fb76522c7d3c403280037f2d0de407e3 2024-11-20T19:26:11,229 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/d25c244a576e495bbeb1c5b90f039e09 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/d25c244a576e495bbeb1c5b90f039e09 2024-11-20T19:26:11,230 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/6dd0efb953a8482cb989e98e5a256217 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/6dd0efb953a8482cb989e98e5a256217 2024-11-20T19:26:11,232 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/894be7a2b8da473da2622bcc0ba15d71 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/894be7a2b8da473da2622bcc0ba15d71 2024-11-20T19:26:11,233 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/8dbb37167a5d44cd9a8ed4863352d939 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/8dbb37167a5d44cd9a8ed4863352d939 2024-11-20T19:26:11,235 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/995033e7637c422ebfa776bb7db9f333 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/995033e7637c422ebfa776bb7db9f333 2024-11-20T19:26:11,236 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/375c25c6fc864e2a854f7207944042f6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/375c25c6fc864e2a854f7207944042f6 2024-11-20T19:26:11,238 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/213595b1fb104bec84795735db6b275a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/213595b1fb104bec84795735db6b275a 2024-11-20T19:26:11,239 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/95bc29caab574e738b966b8f0c31d2bd to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/95bc29caab574e738b966b8f0c31d2bd 2024-11-20T19:26:11,241 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/326569ca34b54de3804111c3ad3bc73e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/326569ca34b54de3804111c3ad3bc73e 2024-11-20T19:26:11,242 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/3cc1cafb98cd4394ad2a4ac9aff510a2 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/3cc1cafb98cd4394ad2a4ac9aff510a2 2024-11-20T19:26:11,244 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/25080712d4fa4dfe9ce55d378ac0844f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/25080712d4fa4dfe9ce55d378ac0844f 2024-11-20T19:26:11,245 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/4978dbd429f542148578fcd054482c68 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/4978dbd429f542148578fcd054482c68 2024-11-20T19:26:11,247 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a6ad61ec84d344e8998ae3d3a265b345 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/a6ad61ec84d344e8998ae3d3a265b345 2024-11-20T19:26:11,248 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/66abe1b5857d4846a8cc244b8f9d83c0 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/66abe1b5857d4846a8cc244b8f9d83c0 2024-11-20T19:26:11,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/3bcef55ecae040ac860a02c171bb5f02 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/3bcef55ecae040ac860a02c171bb5f02 2024-11-20T19:26:11,251 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5f44807f7da04a9ba8e6e6268c2cbdf0 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/5f44807f7da04a9ba8e6e6268c2cbdf0 2024-11-20T19:26:11,252 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/7ad6833f24f04a838623a03ad400487b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/7ad6833f24f04a838623a03ad400487b 2024-11-20T19:26:11,259 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/a394d14ae34548d69b1a7ff7ecee0a15, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/bd6d7d8da3294057831b11b202c522da, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/ebc251d35b714cc4b6949cf5d37d66e8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/8ae3efd97e704d05ad00b7009d6114f2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/118831bfd2754c93b2c25a877cdbce06, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/0b72f2eb6ed845049e381c31bfc167cb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/e094a78938ac4c11af0ac8f066c4b4ee, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/8f7683f20c39475681998015b40a5164, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/971424db5efc4aa6b7a2c8e7b733ebcd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/a56647faf65c4cfdb5936e7c5a36ff55, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/137da047d3634fa291acd79f279c8c7f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/9b03773caa674adea8c4e584d6e2a6dd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/222d9820455442f9951573bf6aba7b0f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/03f15fe528794fb5995507df463ea744, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/049fd35e086d474ca0ee1be475faafa9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/bf3a2c948e4c40b2a61679eeeecabcd9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/e613d47a44bb4378bd5333d5ed846fee, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/4cd5e74764474d78b4988fb254028dbf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/964b38e2a7ef4298a546aebaffbbb377, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f70669460e5148ab8a6bba6c8dc2007a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/390149982a1d490e95b58a62d09565aa, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/62ed94897bab49b481058108b2fce75c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/0b00ab54557b4c1596f98c0c57829aca, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/7b3a8d75a2fb47f5b3507573c4efbaf7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f61340ea5b72467986a15e1444e814f6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/6f5ef2555335497bb2cfec3a5b9ba389, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/c46ae3db97c14caf8c365e49006695ea, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/2193499af78c455b8caf838d0f9a4dba, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/fc4f4be882e24c4e8f451744e99ce501, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/542f4033fd424801b8e548592f20063f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/46334b42977a48848d150c5219345a12] to archive 2024-11-20T19:26:11,261 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:26:11,265 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/a394d14ae34548d69b1a7ff7ecee0a15 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/a394d14ae34548d69b1a7ff7ecee0a15 2024-11-20T19:26:11,268 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/bd6d7d8da3294057831b11b202c522da to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/bd6d7d8da3294057831b11b202c522da 2024-11-20T19:26:11,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/ebc251d35b714cc4b6949cf5d37d66e8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/ebc251d35b714cc4b6949cf5d37d66e8 2024-11-20T19:26:11,273 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/8ae3efd97e704d05ad00b7009d6114f2 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/8ae3efd97e704d05ad00b7009d6114f2 2024-11-20T19:26:11,275 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/118831bfd2754c93b2c25a877cdbce06 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/118831bfd2754c93b2c25a877cdbce06 2024-11-20T19:26:11,277 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/0b72f2eb6ed845049e381c31bfc167cb to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/0b72f2eb6ed845049e381c31bfc167cb 2024-11-20T19:26:11,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/e094a78938ac4c11af0ac8f066c4b4ee to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/e094a78938ac4c11af0ac8f066c4b4ee 2024-11-20T19:26:11,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/8f7683f20c39475681998015b40a5164 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/8f7683f20c39475681998015b40a5164 2024-11-20T19:26:11,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/971424db5efc4aa6b7a2c8e7b733ebcd to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/971424db5efc4aa6b7a2c8e7b733ebcd 2024-11-20T19:26:11,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/a56647faf65c4cfdb5936e7c5a36ff55 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/a56647faf65c4cfdb5936e7c5a36ff55 2024-11-20T19:26:11,287 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/137da047d3634fa291acd79f279c8c7f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/137da047d3634fa291acd79f279c8c7f 2024-11-20T19:26:11,288 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/9b03773caa674adea8c4e584d6e2a6dd to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/9b03773caa674adea8c4e584d6e2a6dd 2024-11-20T19:26:11,289 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/222d9820455442f9951573bf6aba7b0f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/222d9820455442f9951573bf6aba7b0f 2024-11-20T19:26:11,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/03f15fe528794fb5995507df463ea744 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/03f15fe528794fb5995507df463ea744 2024-11-20T19:26:11,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/049fd35e086d474ca0ee1be475faafa9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/049fd35e086d474ca0ee1be475faafa9 2024-11-20T19:26:11,292 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/bf3a2c948e4c40b2a61679eeeecabcd9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/bf3a2c948e4c40b2a61679eeeecabcd9 2024-11-20T19:26:11,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/e613d47a44bb4378bd5333d5ed846fee to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/e613d47a44bb4378bd5333d5ed846fee 2024-11-20T19:26:11,294 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/4cd5e74764474d78b4988fb254028dbf to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/4cd5e74764474d78b4988fb254028dbf 2024-11-20T19:26:11,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/964b38e2a7ef4298a546aebaffbbb377 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/964b38e2a7ef4298a546aebaffbbb377 2024-11-20T19:26:11,297 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f70669460e5148ab8a6bba6c8dc2007a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f70669460e5148ab8a6bba6c8dc2007a 2024-11-20T19:26:11,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/390149982a1d490e95b58a62d09565aa to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/390149982a1d490e95b58a62d09565aa 2024-11-20T19:26:11,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/62ed94897bab49b481058108b2fce75c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/62ed94897bab49b481058108b2fce75c 2024-11-20T19:26:11,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/0b00ab54557b4c1596f98c0c57829aca to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/0b00ab54557b4c1596f98c0c57829aca 2024-11-20T19:26:11,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/7b3a8d75a2fb47f5b3507573c4efbaf7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/7b3a8d75a2fb47f5b3507573c4efbaf7 2024-11-20T19:26:11,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f61340ea5b72467986a15e1444e814f6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f61340ea5b72467986a15e1444e814f6 2024-11-20T19:26:11,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/6f5ef2555335497bb2cfec3a5b9ba389 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/6f5ef2555335497bb2cfec3a5b9ba389 2024-11-20T19:26:11,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/c46ae3db97c14caf8c365e49006695ea to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/c46ae3db97c14caf8c365e49006695ea 2024-11-20T19:26:11,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/2193499af78c455b8caf838d0f9a4dba to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/2193499af78c455b8caf838d0f9a4dba 2024-11-20T19:26:11,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/fc4f4be882e24c4e8f451744e99ce501 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/fc4f4be882e24c4e8f451744e99ce501 2024-11-20T19:26:11,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/542f4033fd424801b8e548592f20063f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/542f4033fd424801b8e548592f20063f 2024-11-20T19:26:11,325 DEBUG [StoreCloser-TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/46334b42977a48848d150c5219345a12 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/46334b42977a48848d150c5219345a12 2024-11-20T19:26:11,336 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/recovered.edits/508.seqid, newMaxSeqId=508, maxSeqId=4 2024-11-20T19:26:11,338 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37. 2024-11-20T19:26:11,338 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for 550c6d15b8cc28d8b0f43501c9366c37: 2024-11-20T19:26:11,340 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed 550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,340 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=550c6d15b8cc28d8b0f43501c9366c37, regionState=CLOSED 2024-11-20T19:26:11,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-20T19:26:11,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure 550c6d15b8cc28d8b0f43501c9366c37, server=db9c3a6c6492,41229,1732130701496 in 1.6330 sec 2024-11-20T19:26:11,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-20T19:26:11,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=550c6d15b8cc28d8b0f43501c9366c37, UNASSIGN in 1.6380 sec 2024-11-20T19:26:11,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-20T19:26:11,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6430 sec 2024-11-20T19:26:11,352 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130771351"}]},"ts":"1732130771351"} 2024-11-20T19:26:11,353 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T19:26:11,397 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T19:26:11,399 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.7140 sec 2024-11-20T19:26:11,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T19:26:11,791 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-20T19:26:11,791 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T19:26:11,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:11,793 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:11,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T19:26:11,793 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:11,794 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,801 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/recovered.edits] 2024-11-20T19:26:11,803 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/09d9971105404d17adf27a4eb5d87780 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/09d9971105404d17adf27a4eb5d87780 2024-11-20T19:26:11,805 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/8c6ccea5c4a9466987a77bcf5c75f36d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/8c6ccea5c4a9466987a77bcf5c75f36d 2024-11-20T19:26:11,806 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/9b3715d8a7554d239a49b61d633d51a3 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/9b3715d8a7554d239a49b61d633d51a3 2024-11-20T19:26:11,807 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/acb19ef338aa4835bde22ba9f9fecde0 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/A/acb19ef338aa4835bde22ba9f9fecde0 2024-11-20T19:26:11,809 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/01868bb07f5d4013afd18e35abd0fa54 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/01868bb07f5d4013afd18e35abd0fa54 2024-11-20T19:26:11,810 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/2d929c4e43da4f51af0205aeba6afe8f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/2d929c4e43da4f51af0205aeba6afe8f 2024-11-20T19:26:11,811 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/32ea247b59f343bfa8fdd5545d64c274 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/32ea247b59f343bfa8fdd5545d64c274 2024-11-20T19:26:11,812 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/44c5f0f1bccc419ab47fa26b2db53017 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/B/44c5f0f1bccc419ab47fa26b2db53017 2024-11-20T19:26:11,814 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/11a55c93e6c646c6ab8517acc5bb0107 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/11a55c93e6c646c6ab8517acc5bb0107 2024-11-20T19:26:11,816 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/56b9a8f66c3e46a0bc66add520f3a057 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/56b9a8f66c3e46a0bc66add520f3a057 2024-11-20T19:26:11,817 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/95925e8f55f640ed94257172477da0c7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/95925e8f55f640ed94257172477da0c7 2024-11-20T19:26:11,818 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f4d0fc125e794b5ca951febaa59737e4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/C/f4d0fc125e794b5ca951febaa59737e4 2024-11-20T19:26:11,820 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/recovered.edits/508.seqid to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37/recovered.edits/508.seqid 2024-11-20T19:26:11,821 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,821 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T19:26:11,821 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T19:26:11,822 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T19:26:11,826 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112002e2e2c7d63141d08e8109148b81d09d_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112002e2e2c7d63141d08e8109148b81d09d_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,827 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201bf5b5e35873441685940a87e43de33c_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201bf5b5e35873441685940a87e43de33c_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,828 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202e99da3b04714e15828adc52713fc84e_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202e99da3b04714e15828adc52713fc84e_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,829 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112034229c6346724f5a81ecbd2b601d6f60_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112034229c6346724f5a81ecbd2b601d6f60_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,831 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203cc2d04ca8d64be6a5d0940535793822_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203cc2d04ca8d64be6a5d0940535793822_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,838 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112047c9effbeb114c66819a90711448f5fd_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112047c9effbeb114c66819a90711448f5fd_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,840 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112058b3c46461c44b34ba785ede0c1d100b_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112058b3c46461c44b34ba785ede0c1d100b_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,841 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205fb3c2bcba6c452b911305eb20f9e2c2_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205fb3c2bcba6c452b911305eb20f9e2c2_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,842 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112061dc8e6cec994d3f9eaa6b67b7af7a6e_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112061dc8e6cec994d3f9eaa6b67b7af7a6e_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,843 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207840e21fe2004b519cd7c1ca79a57dcf_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207840e21fe2004b519cd7c1ca79a57dcf_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,844 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a4dd3813b7e6427b9637d5ecd4d495c4_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a4dd3813b7e6427b9637d5ecd4d495c4_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,845 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a5666da050ba4793b83ec009e252dfec_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a5666da050ba4793b83ec009e252dfec_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,847 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a98cf7f46e874729a57c91e12ff5fbb7_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a98cf7f46e874729a57c91e12ff5fbb7_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,848 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ad6e4aa66360417197644ebce4df67ba_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ad6e4aa66360417197644ebce4df67ba_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,849 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b53df4d6ea1c4777b545f347ed53cc5e_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b53df4d6ea1c4777b545f347ed53cc5e_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,850 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bc2fbb70fad744baa0c774fe529f82cf_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bc2fbb70fad744baa0c774fe529f82cf_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,851 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bfeef67a2ade4dac8d63d4f37773b2ce_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bfeef67a2ade4dac8d63d4f37773b2ce_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,852 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cc2da17724ad4a3ea44a748cc3f016c0_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cc2da17724ad4a3ea44a748cc3f016c0_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,853 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cd47aec2fd2e46b39fa6e8f5ca379499_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cd47aec2fd2e46b39fa6e8f5ca379499_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,854 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d102f773b1ef4e64a71136feef47e17f_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d102f773b1ef4e64a71136feef47e17f_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,855 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d6863164e4364075b2752f8edabae8b8_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d6863164e4364075b2752f8edabae8b8_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,856 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e3b28d5a4ff74d5cbdeb06693cd4fd49_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e3b28d5a4ff74d5cbdeb06693cd4fd49_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,858 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e552cefd0f5043718e350266f94fb4d8_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e552cefd0f5043718e350266f94fb4d8_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,859 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ed9433c0c7a94d719def3df44842a865_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ed9433c0c7a94d719def3df44842a865_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,860 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f3ab5066dff84f5b8ff0a50d208865f8_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f3ab5066dff84f5b8ff0a50d208865f8_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,861 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fc5d5df803194d859e269484a33ae6a3_550c6d15b8cc28d8b0f43501c9366c37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fc5d5df803194d859e269484a33ae6a3_550c6d15b8cc28d8b0f43501c9366c37 2024-11-20T19:26:11,862 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T19:26:11,864 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:11,867 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T19:26:11,869 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T19:26:11,870 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:11,870 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T19:26:11,871 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732130771870"}]},"ts":"9223372036854775807"} 2024-11-20T19:26:11,873 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T19:26:11,873 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 550c6d15b8cc28d8b0f43501c9366c37, NAME => 'TestAcidGuarantees,,1732130737312.550c6d15b8cc28d8b0f43501c9366c37.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T19:26:11,873 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T19:26:11,873 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732130771873"}]},"ts":"9223372036854775807"} 2024-11-20T19:26:11,875 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T19:26:11,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T19:26:11,914 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:11,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 123 msec 2024-11-20T19:26:12,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T19:26:12,095 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-20T19:26:12,105 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=237 (was 238), OpenFileDescriptor=451 (was 457), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=647 (was 543) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3477 (was 3722) 2024-11-20T19:26:12,114 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=237, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=647, ProcessCount=11, AvailableMemoryMB=3477 2024-11-20T19:26:12,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:26:12,116 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:26:12,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:12,118 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:26:12,118 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:12,118 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-11-20T19:26:12,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T19:26:12,118 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:26:12,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742084_1260 (size=960) 2024-11-20T19:26:12,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T19:26:12,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T19:26:12,526 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203 2024-11-20T19:26:12,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742085_1261 (size=53) 2024-11-20T19:26:12,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T19:26:12,932 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:26:12,932 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing eff3b3afe40ac2d93c9d770f2a159636, disabling compactions & flushes 2024-11-20T19:26:12,932 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:12,932 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:12,932 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. after waiting 0 ms 2024-11-20T19:26:12,932 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:12,932 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:12,932 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:12,933 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:26:12,933 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732130772933"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130772933"}]},"ts":"1732130772933"} 2024-11-20T19:26:12,934 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:26:12,935 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:26:12,935 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130772935"}]},"ts":"1732130772935"} 2024-11-20T19:26:12,935 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T19:26:12,980 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=eff3b3afe40ac2d93c9d770f2a159636, ASSIGN}] 2024-11-20T19:26:12,982 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=eff3b3afe40ac2d93c9d770f2a159636, ASSIGN 2024-11-20T19:26:12,983 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=eff3b3afe40ac2d93c9d770f2a159636, ASSIGN; state=OFFLINE, location=db9c3a6c6492,41229,1732130701496; forceNewPlan=false, retain=false 2024-11-20T19:26:13,133 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=eff3b3afe40ac2d93c9d770f2a159636, regionState=OPENING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:13,134 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:26:13,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T19:26:13,286 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:13,289 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:13,289 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:26:13,289 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:13,289 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:26:13,289 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:13,289 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:13,291 INFO [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:13,292 INFO [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:13,292 INFO [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eff3b3afe40ac2d93c9d770f2a159636 columnFamilyName A 2024-11-20T19:26:13,292 DEBUG [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:13,292 INFO [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] regionserver.HStore(327): Store=eff3b3afe40ac2d93c9d770f2a159636/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:13,293 INFO [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:13,293 INFO [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:13,294 INFO [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eff3b3afe40ac2d93c9d770f2a159636 columnFamilyName B 2024-11-20T19:26:13,294 DEBUG [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:13,294 INFO [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] regionserver.HStore(327): Store=eff3b3afe40ac2d93c9d770f2a159636/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:13,294 INFO [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:13,295 INFO [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:13,295 INFO [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eff3b3afe40ac2d93c9d770f2a159636 columnFamilyName C 2024-11-20T19:26:13,295 DEBUG [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:13,296 INFO [StoreOpener-eff3b3afe40ac2d93c9d770f2a159636-1 {}] regionserver.HStore(327): Store=eff3b3afe40ac2d93c9d770f2a159636/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:13,296 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:13,296 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:13,296 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:13,298 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:26:13,299 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:13,301 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:26:13,301 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened eff3b3afe40ac2d93c9d770f2a159636; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59503845, jitterRate=-0.11332361400127411}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:26:13,302 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:13,302 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., pid=70, masterSystemTime=1732130773286 2024-11-20T19:26:13,304 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:13,304 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:13,304 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=eff3b3afe40ac2d93c9d770f2a159636, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:13,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-20T19:26:13,306 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 in 171 msec 2024-11-20T19:26:13,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-11-20T19:26:13,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=eff3b3afe40ac2d93c9d770f2a159636, ASSIGN in 326 msec 2024-11-20T19:26:13,307 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:26:13,307 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130773307"}]},"ts":"1732130773307"} 2024-11-20T19:26:13,308 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T19:26:13,491 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:26:13,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3750 sec 2024-11-20T19:26:14,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T19:26:14,223 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-11-20T19:26:14,225 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b70f48f to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f66057f 2024-11-20T19:26:14,264 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53bfce45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:14,266 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:14,267 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57986, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:14,268 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:26:14,270 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35062, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:26:14,272 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58341641 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17b6adc5 2024-11-20T19:26:14,280 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a569490, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:14,283 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x44645c55 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@669e1999 2024-11-20T19:26:14,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6862e3ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:14,296 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64ee0130 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72aa9ee5 2024-11-20T19:26:14,306 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d296fed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:14,307 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683b64c3 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ec09297 2024-11-20T19:26:14,331 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8d0caa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:14,333 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07e55eb7 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4dfb20f6 2024-11-20T19:26:14,345 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43f04e0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:14,347 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17cf7fc0 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560ec309 2024-11-20T19:26:14,364 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fef31f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:14,365 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78b04266 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5886c0f2 2024-11-20T19:26:14,381 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb04aeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:14,382 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x088aa519 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e575aa 2024-11-20T19:26:14,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0e9c8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:14,404 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e998dd3 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@131ceb8f 2024-11-20T19:26:14,414 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d68f787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:14,415 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e4c79b8 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a78bf6d 2024-11-20T19:26:14,428 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e6bf6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:14,439 DEBUG [hconnection-0x3e228f6d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:14,440 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58002, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:14,443 DEBUG [hconnection-0x4b90a49e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:14,444 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:14,453 DEBUG [hconnection-0x7b7d4f2e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:14,454 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58008, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:14,457 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:14,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:14,457 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:14,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:14,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:14,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:14,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:14,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:14,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-20T19:26:14,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:14,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T19:26:14,460 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:14,460 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:14,460 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:14,481 DEBUG [hconnection-0xa1815ef-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:14,482 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58014, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:14,487 DEBUG [hconnection-0x4220ef3d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:14,489 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:14,498 DEBUG [hconnection-0x688bfe27-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:14,500 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:14,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130834505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130834506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130834508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,511 DEBUG [hconnection-0x36f35b87-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:14,512 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58042, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:14,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130834514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,544 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/c9a7831a5dbf4eba8b6daa6f037f366d is 50, key is test_row_0/A:col10/1732130774455/Put/seqid=0 2024-11-20T19:26:14,545 DEBUG [hconnection-0x7490d2c0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:14,545 DEBUG [hconnection-0x68e5737b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:14,546 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58048, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:14,546 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58050, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:14,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T19:26:14,563 DEBUG [hconnection-0x6e29e51-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:14,564 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:14,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130834580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742086_1262 (size=12001) 2024-11-20T19:26:14,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130834608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130834609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130834609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,612 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:14,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:14,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:14,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:14,613 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130834616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130834684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T19:26:14,765 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:14,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:14,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:14,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:14,766 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130834812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130834813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130834813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130834819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130834887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,918 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:14,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:14,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:14,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:14,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:14,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,989 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/c9a7831a5dbf4eba8b6daa6f037f366d 2024-11-20T19:26:15,037 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/1df6287842de4424b09fa696f0585378 is 50, key is test_row_0/B:col10/1732130774455/Put/seqid=0 2024-11-20T19:26:15,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T19:26:15,071 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:15,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:15,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742087_1263 (size=12001) 2024-11-20T19:26:15,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130835117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130835118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130835119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130835124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130835189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,224 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:15,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:15,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,377 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:15,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:15,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,377 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,476 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/1df6287842de4424b09fa696f0585378 2024-11-20T19:26:15,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/2efa636ae6374fb09125f6c0cee4c043 is 50, key is test_row_0/C:col10/1732130774455/Put/seqid=0 2024-11-20T19:26:15,529 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:15,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:15,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,530 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742088_1264 (size=12001) 2024-11-20T19:26:15,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T19:26:15,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130835622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130835624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130835626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130835630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,685 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:15,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:15,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,686 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130835692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,839 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,839 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:15,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:15,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,839 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:15,945 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/2efa636ae6374fb09125f6c0cee4c043 2024-11-20T19:26:15,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/c9a7831a5dbf4eba8b6daa6f037f366d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/c9a7831a5dbf4eba8b6daa6f037f366d 2024-11-20T19:26:15,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/c9a7831a5dbf4eba8b6daa6f037f366d, entries=150, sequenceid=12, filesize=11.7 K 2024-11-20T19:26:15,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/1df6287842de4424b09fa696f0585378 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/1df6287842de4424b09fa696f0585378 2024-11-20T19:26:15,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/1df6287842de4424b09fa696f0585378, entries=150, sequenceid=12, filesize=11.7 K 2024-11-20T19:26:15,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/2efa636ae6374fb09125f6c0cee4c043 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/2efa636ae6374fb09125f6c0cee4c043 2024-11-20T19:26:15,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/2efa636ae6374fb09125f6c0cee4c043, entries=150, sequenceid=12, filesize=11.7 K 2024-11-20T19:26:15,975 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for eff3b3afe40ac2d93c9d770f2a159636 in 1518ms, sequenceid=12, compaction requested=false 2024-11-20T19:26:15,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:15,991 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:15,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:15,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:15,992 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:26:15,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:15,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:15,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:15,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:15,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:15,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:16,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/e4e416d99de944a58026ddbd591abeee is 50, key is test_row_0/A:col10/1732130774505/Put/seqid=0 2024-11-20T19:26:16,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742089_1265 (size=12001) 2024-11-20T19:26:16,035 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/e4e416d99de944a58026ddbd591abeee 2024-11-20T19:26:16,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/c45b69bedd754053a26d5cd4bcec490e is 50, key is test_row_0/B:col10/1732130774505/Put/seqid=0 2024-11-20T19:26:16,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742090_1266 (size=12001) 2024-11-20T19:26:16,078 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/c45b69bedd754053a26d5cd4bcec490e 2024-11-20T19:26:16,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/eb39484812db4d3788c67ad4444e3ec9 is 50, key is test_row_0/C:col10/1732130774505/Put/seqid=0 2024-11-20T19:26:16,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742091_1267 (size=12001) 2024-11-20T19:26:16,149 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/eb39484812db4d3788c67ad4444e3ec9 2024-11-20T19:26:16,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/e4e416d99de944a58026ddbd591abeee as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/e4e416d99de944a58026ddbd591abeee 2024-11-20T19:26:16,163 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/e4e416d99de944a58026ddbd591abeee, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T19:26:16,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/c45b69bedd754053a26d5cd4bcec490e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/c45b69bedd754053a26d5cd4bcec490e 2024-11-20T19:26:16,170 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/c45b69bedd754053a26d5cd4bcec490e, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T19:26:16,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/eb39484812db4d3788c67ad4444e3ec9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/eb39484812db4d3788c67ad4444e3ec9 2024-11-20T19:26:16,177 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/eb39484812db4d3788c67ad4444e3ec9, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T19:26:16,178 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for eff3b3afe40ac2d93c9d770f2a159636 in 186ms, sequenceid=37, compaction requested=false 2024-11-20T19:26:16,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:16,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:16,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-20T19:26:16,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-20T19:26:16,181 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-20T19:26:16,181 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7190 sec 2024-11-20T19:26:16,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.7250 sec 2024-11-20T19:26:16,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T19:26:16,563 INFO [Thread-1206 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-20T19:26:16,564 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:16,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-20T19:26:16,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T19:26:16,568 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:16,569 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:16,569 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:16,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:16,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:16,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:16,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:16,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:16,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:16,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:16,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:16,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/bfc28a4541e0402a942aa6c0c7ce109b is 50, key is test_row_0/A:col10/1732130776637/Put/seqid=0 2024-11-20T19:26:16,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T19:26:16,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742092_1268 (size=9657) 2024-11-20T19:26:16,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130836669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:16,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130836670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:16,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130836671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:16,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/bfc28a4541e0402a942aa6c0c7ce109b 2024-11-20T19:26:16,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130836675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:16,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130836702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:16,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/d57f19803e32443da90ad9c0fe911e7e is 50, key is test_row_0/B:col10/1732130776637/Put/seqid=0 2024-11-20T19:26:16,722 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:16,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T19:26:16,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:16,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:16,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:16,722 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:16,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:16,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:16,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742093_1269 (size=9657) 2024-11-20T19:26:16,736 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/d57f19803e32443da90ad9c0fe911e7e 2024-11-20T19:26:16,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/48f404cd23074ccdbf4930978eba2413 is 50, key is test_row_0/C:col10/1732130776637/Put/seqid=0 2024-11-20T19:26:16,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130836777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:16,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130836777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:16,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130836777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:16,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130836782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:16,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742094_1270 (size=9657) 2024-11-20T19:26:16,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/48f404cd23074ccdbf4930978eba2413 2024-11-20T19:26:16,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/bfc28a4541e0402a942aa6c0c7ce109b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/bfc28a4541e0402a942aa6c0c7ce109b 2024-11-20T19:26:16,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/bfc28a4541e0402a942aa6c0c7ce109b, entries=100, sequenceid=48, filesize=9.4 K 2024-11-20T19:26:16,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/d57f19803e32443da90ad9c0fe911e7e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d57f19803e32443da90ad9c0fe911e7e 2024-11-20T19:26:16,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d57f19803e32443da90ad9c0fe911e7e, entries=100, sequenceid=48, filesize=9.4 K 2024-11-20T19:26:16,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/48f404cd23074ccdbf4930978eba2413 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/48f404cd23074ccdbf4930978eba2413 2024-11-20T19:26:16,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/48f404cd23074ccdbf4930978eba2413, entries=100, sequenceid=48, filesize=9.4 K 2024-11-20T19:26:16,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for eff3b3afe40ac2d93c9d770f2a159636 in 207ms, sequenceid=48, compaction requested=true 2024-11-20T19:26:16,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:16,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:16,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:16,847 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:16,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:16,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:16,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:16,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:26:16,847 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:16,848 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:16,849 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/A is initiating minor compaction (all files) 2024-11-20T19:26:16,849 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/A in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:16,849 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/c9a7831a5dbf4eba8b6daa6f037f366d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/e4e416d99de944a58026ddbd591abeee, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/bfc28a4541e0402a942aa6c0c7ce109b] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=32.9 K 2024-11-20T19:26:16,849 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:16,849 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/B is initiating minor compaction (all files) 2024-11-20T19:26:16,849 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/B in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:16,849 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/1df6287842de4424b09fa696f0585378, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/c45b69bedd754053a26d5cd4bcec490e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d57f19803e32443da90ad9c0fe911e7e] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=32.9 K 2024-11-20T19:26:16,850 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1df6287842de4424b09fa696f0585378, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732130774451 2024-11-20T19:26:16,851 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c9a7831a5dbf4eba8b6daa6f037f366d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732130774451 2024-11-20T19:26:16,851 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting c45b69bedd754053a26d5cd4bcec490e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732130774499 2024-11-20T19:26:16,852 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting e4e416d99de944a58026ddbd591abeee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732130774499 2024-11-20T19:26:16,852 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d57f19803e32443da90ad9c0fe911e7e, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732130776634 2024-11-20T19:26:16,852 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting bfc28a4541e0402a942aa6c0c7ce109b, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732130776634 2024-11-20T19:26:16,868 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#B#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:16,869 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/8d85a89313aa4c5fa5833ff59cb8d59c is 50, key is test_row_0/B:col10/1732130776637/Put/seqid=0 2024-11-20T19:26:16,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T19:26:16,874 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:16,877 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#A#compaction#217 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:16,877 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/10c4186ed0ba43caadd8b49975eebe4c is 50, key is test_row_0/A:col10/1732130776637/Put/seqid=0 2024-11-20T19:26:16,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T19:26:16,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:16,880 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:26:16,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:16,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:16,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:16,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:16,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:16,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:16,901 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T19:26:16,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/1df33656f25e44968a7b46e3bda51779 is 50, key is test_row_0/A:col10/1732130776668/Put/seqid=0 2024-11-20T19:26:17,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:17,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742095_1271 (size=12104) 2024-11-20T19:26:17,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:17,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742096_1272 (size=12104) 2024-11-20T19:26:17,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130837086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,094 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/10c4186ed0ba43caadd8b49975eebe4c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/10c4186ed0ba43caadd8b49975eebe4c 2024-11-20T19:26:17,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130837089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130837088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742097_1273 (size=12001) 2024-11-20T19:26:17,101 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/A of eff3b3afe40ac2d93c9d770f2a159636 into 10c4186ed0ba43caadd8b49975eebe4c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:17,101 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:17,101 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/A, priority=13, startTime=1732130776847; duration=0sec 2024-11-20T19:26:17,101 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:17,101 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:A 2024-11-20T19:26:17,102 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:17,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130837093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,103 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:17,103 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/C is initiating minor compaction (all files) 2024-11-20T19:26:17,103 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/C in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:17,103 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/2efa636ae6374fb09125f6c0cee4c043, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/eb39484812db4d3788c67ad4444e3ec9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/48f404cd23074ccdbf4930978eba2413] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=32.9 K 2024-11-20T19:26:17,104 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 2efa636ae6374fb09125f6c0cee4c043, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732130774451 2024-11-20T19:26:17,104 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting eb39484812db4d3788c67ad4444e3ec9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732130774499 2024-11-20T19:26:17,104 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 48f404cd23074ccdbf4930978eba2413, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732130776634 2024-11-20T19:26:17,132 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#C#compaction#219 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:17,133 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/3df0d0fcf1ec4b149265bc06941420c5 is 50, key is test_row_0/C:col10/1732130776637/Put/seqid=0 2024-11-20T19:26:17,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742098_1274 (size=12104) 2024-11-20T19:26:17,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T19:26:17,186 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/3df0d0fcf1ec4b149265bc06941420c5 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/3df0d0fcf1ec4b149265bc06941420c5 2024-11-20T19:26:17,198 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/C of eff3b3afe40ac2d93c9d770f2a159636 into 3df0d0fcf1ec4b149265bc06941420c5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:17,198 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:17,198 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/C, priority=13, startTime=1732130776847; duration=0sec 2024-11-20T19:26:17,199 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:17,199 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:C 2024-11-20T19:26:17,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130837198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130837203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130837205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130837199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130837406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130837410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130837410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130837411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,489 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/8d85a89313aa4c5fa5833ff59cb8d59c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/8d85a89313aa4c5fa5833ff59cb8d59c 2024-11-20T19:26:17,499 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/B of eff3b3afe40ac2d93c9d770f2a159636 into 8d85a89313aa4c5fa5833ff59cb8d59c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:17,499 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:17,499 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/B, priority=13, startTime=1732130776847; duration=0sec 2024-11-20T19:26:17,500 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:17,500 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:B 2024-11-20T19:26:17,502 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/1df33656f25e44968a7b46e3bda51779 2024-11-20T19:26:17,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/fcc4143c6ff34710ab09ff7f9d8dfeff is 50, key is test_row_0/B:col10/1732130776668/Put/seqid=0 2024-11-20T19:26:17,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742099_1275 (size=12001) 2024-11-20T19:26:17,532 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/fcc4143c6ff34710ab09ff7f9d8dfeff 2024-11-20T19:26:17,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/06109dcd480c47dcbfb90aafef197082 is 50, key is test_row_0/C:col10/1732130776668/Put/seqid=0 2024-11-20T19:26:17,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742100_1276 (size=12001) 2024-11-20T19:26:17,585 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/06109dcd480c47dcbfb90aafef197082 2024-11-20T19:26:17,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/1df33656f25e44968a7b46e3bda51779 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/1df33656f25e44968a7b46e3bda51779 2024-11-20T19:26:17,601 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/1df33656f25e44968a7b46e3bda51779, entries=150, sequenceid=73, filesize=11.7 K 2024-11-20T19:26:17,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/fcc4143c6ff34710ab09ff7f9d8dfeff as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/fcc4143c6ff34710ab09ff7f9d8dfeff 2024-11-20T19:26:17,611 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/fcc4143c6ff34710ab09ff7f9d8dfeff, entries=150, sequenceid=73, filesize=11.7 K 2024-11-20T19:26:17,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/06109dcd480c47dcbfb90aafef197082 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/06109dcd480c47dcbfb90aafef197082 2024-11-20T19:26:17,632 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/06109dcd480c47dcbfb90aafef197082, entries=150, sequenceid=73, filesize=11.7 K 2024-11-20T19:26:17,633 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for eff3b3afe40ac2d93c9d770f2a159636 in 753ms, sequenceid=73, compaction requested=false 2024-11-20T19:26:17,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:17,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:17,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-20T19:26:17,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-20T19:26:17,636 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-20T19:26:17,636 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0650 sec 2024-11-20T19:26:17,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.0730 sec 2024-11-20T19:26:17,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T19:26:17,673 INFO [Thread-1206 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-20T19:26:17,675 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:17,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-20T19:26:17,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T19:26:17,676 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:17,677 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:17,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:17,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:26:17,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:17,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:17,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:17,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:17,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:17,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:17,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:17,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/a81962eaa9ec493b8c92cae70406201e is 50, key is test_row_0/A:col10/1732130777710/Put/seqid=0 2024-11-20T19:26:17,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742101_1277 (size=12001) 2024-11-20T19:26:17,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130837771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130837773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130837772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130837774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T19:26:17,828 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T19:26:17,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:17,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:17,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:17,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130837876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130837877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130837877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130837886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T19:26:17,989 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:17,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T19:26:17,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:17,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:17,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:17,989 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130838081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130838082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130838082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130838090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,137 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/a81962eaa9ec493b8c92cae70406201e 2024-11-20T19:26:18,142 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T19:26:18,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:18,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/35200128acc5479d888a78e90595bc81 is 50, key is test_row_0/B:col10/1732130777710/Put/seqid=0 2024-11-20T19:26:18,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742102_1278 (size=12001) 2024-11-20T19:26:18,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T19:26:18,296 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T19:26:18,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:18,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,296 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130838385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130838386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130838387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130838394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,448 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T19:26:18,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:18,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/35200128acc5479d888a78e90595bc81 2024-11-20T19:26:18,601 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/831e02577f0c4c339c0cea91928c2e28 is 50, key is test_row_0/C:col10/1732130777710/Put/seqid=0 2024-11-20T19:26:18,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T19:26:18,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:18,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,607 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742103_1279 (size=12001) 2024-11-20T19:26:18,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/831e02577f0c4c339c0cea91928c2e28 2024-11-20T19:26:18,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/a81962eaa9ec493b8c92cae70406201e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/a81962eaa9ec493b8c92cae70406201e 2024-11-20T19:26:18,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/a81962eaa9ec493b8c92cae70406201e, entries=150, sequenceid=88, filesize=11.7 K 2024-11-20T19:26:18,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/35200128acc5479d888a78e90595bc81 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/35200128acc5479d888a78e90595bc81 2024-11-20T19:26:18,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/35200128acc5479d888a78e90595bc81, entries=150, sequenceid=88, filesize=11.7 K 2024-11-20T19:26:18,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/831e02577f0c4c339c0cea91928c2e28 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/831e02577f0c4c339c0cea91928c2e28 2024-11-20T19:26:18,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/831e02577f0c4c339c0cea91928c2e28, entries=150, sequenceid=88, filesize=11.7 K 2024-11-20T19:26:18,676 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for eff3b3afe40ac2d93c9d770f2a159636 in 965ms, sequenceid=88, compaction requested=true 2024-11-20T19:26:18,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:18,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:18,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:18,676 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:18,676 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:18,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:18,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:18,677 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:18,677 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/B is initiating minor compaction (all files) 2024-11-20T19:26:18,677 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/B in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,678 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/8d85a89313aa4c5fa5833ff59cb8d59c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/fcc4143c6ff34710ab09ff7f9d8dfeff, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/35200128acc5479d888a78e90595bc81] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=35.3 K 2024-11-20T19:26:18,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:18,678 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:18,678 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/A is initiating minor compaction (all files) 2024-11-20T19:26:18,678 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/A in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,678 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d85a89313aa4c5fa5833ff59cb8d59c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732130774499 2024-11-20T19:26:18,678 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/10c4186ed0ba43caadd8b49975eebe4c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/1df33656f25e44968a7b46e3bda51779, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/a81962eaa9ec493b8c92cae70406201e] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=35.3 K 2024-11-20T19:26:18,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:18,678 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting fcc4143c6ff34710ab09ff7f9d8dfeff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732130776668 2024-11-20T19:26:18,678 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10c4186ed0ba43caadd8b49975eebe4c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732130774499 2024-11-20T19:26:18,678 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 35200128acc5479d888a78e90595bc81, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732130777085 2024-11-20T19:26:18,679 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1df33656f25e44968a7b46e3bda51779, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732130776668 2024-11-20T19:26:18,679 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting a81962eaa9ec493b8c92cae70406201e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732130777085 2024-11-20T19:26:18,698 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#B#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:18,699 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/9062e57557ee455da326d6306667bcb1 is 50, key is test_row_0/B:col10/1732130777710/Put/seqid=0 2024-11-20T19:26:18,715 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#A#compaction#226 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:18,715 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/caf503dfc01244ee9cc1fd0c9c6a3645 is 50, key is test_row_0/A:col10/1732130777710/Put/seqid=0 2024-11-20T19:26:18,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:18,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:26:18,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:18,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:18,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:18,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:18,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:18,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:18,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/d8de5e3986bc429290b82bbc3e447161 is 50, key is test_row_0/A:col10/1732130777771/Put/seqid=0 2024-11-20T19:26:18,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742104_1280 (size=12207) 2024-11-20T19:26:18,759 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T19:26:18,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:18,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742105_1281 (size=12207) 2024-11-20T19:26:18,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130838766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,778 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/caf503dfc01244ee9cc1fd0c9c6a3645 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/caf503dfc01244ee9cc1fd0c9c6a3645 2024-11-20T19:26:18,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T19:26:18,787 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/A of eff3b3afe40ac2d93c9d770f2a159636 into caf503dfc01244ee9cc1fd0c9c6a3645(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:18,787 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:18,787 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/A, priority=13, startTime=1732130778676; duration=0sec 2024-11-20T19:26:18,787 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:18,787 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:A 2024-11-20T19:26:18,787 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:18,789 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:18,789 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/C is initiating minor compaction (all files) 2024-11-20T19:26:18,789 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/C in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,789 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/3df0d0fcf1ec4b149265bc06941420c5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/06109dcd480c47dcbfb90aafef197082, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/831e02577f0c4c339c0cea91928c2e28] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=35.3 K 2024-11-20T19:26:18,790 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3df0d0fcf1ec4b149265bc06941420c5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732130774499 2024-11-20T19:26:18,790 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06109dcd480c47dcbfb90aafef197082, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732130776668 2024-11-20T19:26:18,791 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 831e02577f0c4c339c0cea91928c2e28, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732130777085 2024-11-20T19:26:18,801 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#C#compaction#228 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:18,802 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/ac3b18977e6a4f8d83845725f2a3dfc4 is 50, key is test_row_0/C:col10/1732130777710/Put/seqid=0 2024-11-20T19:26:18,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742106_1282 (size=14341) 2024-11-20T19:26:18,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/d8de5e3986bc429290b82bbc3e447161 2024-11-20T19:26:18,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742107_1283 (size=12207) 2024-11-20T19:26:18,822 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/ac3b18977e6a4f8d83845725f2a3dfc4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/ac3b18977e6a4f8d83845725f2a3dfc4 2024-11-20T19:26:18,832 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/C of eff3b3afe40ac2d93c9d770f2a159636 into ac3b18977e6a4f8d83845725f2a3dfc4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:18,832 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:18,832 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/C, priority=13, startTime=1732130778677; duration=0sec 2024-11-20T19:26:18,832 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:18,832 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:C 2024-11-20T19:26:18,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/9645a22b840e4e52bf33a612e39f2594 is 50, key is test_row_0/B:col10/1732130777771/Put/seqid=0 2024-11-20T19:26:18,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742108_1284 (size=12001) 2024-11-20T19:26:18,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130838875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130838892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130838894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130838894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130838897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,912 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:18,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T19:26:18,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:18,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:18,913 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,065 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:19,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T19:26:19,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:19,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:19,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:19,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130839080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:19,156 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/9062e57557ee455da326d6306667bcb1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9062e57557ee455da326d6306667bcb1 2024-11-20T19:26:19,167 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/B of eff3b3afe40ac2d93c9d770f2a159636 into 9062e57557ee455da326d6306667bcb1(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:19,167 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:19,167 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/B, priority=13, startTime=1732130778676; duration=0sec 2024-11-20T19:26:19,167 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:19,167 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:B 2024-11-20T19:26:19,218 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:19,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T19:26:19,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:19,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:19,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:19,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,247 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/9645a22b840e4e52bf33a612e39f2594 2024-11-20T19:26:19,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/616043a3c58a47a99e18cc6fd12e3da8 is 50, key is test_row_0/C:col10/1732130777771/Put/seqid=0 2024-11-20T19:26:19,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742109_1285 (size=12001) 2024-11-20T19:26:19,290 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/616043a3c58a47a99e18cc6fd12e3da8 2024-11-20T19:26:19,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/d8de5e3986bc429290b82bbc3e447161 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d8de5e3986bc429290b82bbc3e447161 2024-11-20T19:26:19,310 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d8de5e3986bc429290b82bbc3e447161, entries=200, sequenceid=113, filesize=14.0 K 2024-11-20T19:26:19,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/9645a22b840e4e52bf33a612e39f2594 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9645a22b840e4e52bf33a612e39f2594 2024-11-20T19:26:19,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9645a22b840e4e52bf33a612e39f2594, entries=150, sequenceid=113, filesize=11.7 K 2024-11-20T19:26:19,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/616043a3c58a47a99e18cc6fd12e3da8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/616043a3c58a47a99e18cc6fd12e3da8 2024-11-20T19:26:19,334 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/616043a3c58a47a99e18cc6fd12e3da8, entries=150, sequenceid=113, filesize=11.7 K 2024-11-20T19:26:19,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for eff3b3afe40ac2d93c9d770f2a159636 in 611ms, sequenceid=113, compaction requested=false 2024-11-20T19:26:19,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:19,371 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:19,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T19:26:19,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:19,373 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:19,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:19,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:19,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:19,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:19,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:19,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:19,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/5f19800f06304587b251e5885d04d75e is 50, key is test_row_0/A:col10/1732130778763/Put/seqid=0 2024-11-20T19:26:19,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:19,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:19,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742110_1286 (size=12001) 2024-11-20T19:26:19,413 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/5f19800f06304587b251e5885d04d75e 2024-11-20T19:26:19,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/d08f51c1cf2d41ebaacc6fccabc64a19 is 50, key is test_row_0/B:col10/1732130778763/Put/seqid=0 2024-11-20T19:26:19,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742111_1287 (size=12001) 2024-11-20T19:26:19,472 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/d08f51c1cf2d41ebaacc6fccabc64a19 2024-11-20T19:26:19,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/7debdfbdb7f34ac687e38e4add5718d1 is 50, key is test_row_0/C:col10/1732130778763/Put/seqid=0 2024-11-20T19:26:19,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130839498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:19,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742112_1288 (size=12001) 2024-11-20T19:26:19,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130839604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:19,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T19:26:19,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130839808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:19,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130839899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:19,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130839900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:19,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130839904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:19,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130839905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:19,946 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/7debdfbdb7f34ac687e38e4add5718d1 2024-11-20T19:26:19,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/5f19800f06304587b251e5885d04d75e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/5f19800f06304587b251e5885d04d75e 2024-11-20T19:26:19,962 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/5f19800f06304587b251e5885d04d75e, entries=150, sequenceid=127, filesize=11.7 K 2024-11-20T19:26:19,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/d08f51c1cf2d41ebaacc6fccabc64a19 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d08f51c1cf2d41ebaacc6fccabc64a19 2024-11-20T19:26:19,974 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d08f51c1cf2d41ebaacc6fccabc64a19, entries=150, sequenceid=127, filesize=11.7 K 2024-11-20T19:26:19,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/7debdfbdb7f34ac687e38e4add5718d1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/7debdfbdb7f34ac687e38e4add5718d1 2024-11-20T19:26:19,983 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/7debdfbdb7f34ac687e38e4add5718d1, entries=150, sequenceid=127, filesize=11.7 K 2024-11-20T19:26:19,984 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for eff3b3afe40ac2d93c9d770f2a159636 in 611ms, sequenceid=127, compaction requested=true 2024-11-20T19:26:19,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:19,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:19,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-20T19:26:19,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-20T19:26:19,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-20T19:26:19,992 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3120 sec 2024-11-20T19:26:20,023 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 2.3170 sec 2024-11-20T19:26:20,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:20,117 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:26:20,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:20,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:20,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:20,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:20,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:20,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:20,124 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/0fc2bc4260ad4e4b99a4037c59c58aa6 is 50, key is test_row_0/A:col10/1732130779465/Put/seqid=0 2024-11-20T19:26:20,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742113_1289 (size=14541) 2024-11-20T19:26:20,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130840151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:20,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130840256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:20,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130840461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:20,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/0fc2bc4260ad4e4b99a4037c59c58aa6 2024-11-20T19:26:20,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/104771bdc12844e388f6de6452e85ae0 is 50, key is test_row_0/B:col10/1732130779465/Put/seqid=0 2024-11-20T19:26:20,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742114_1290 (size=12151) 2024-11-20T19:26:20,585 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/104771bdc12844e388f6de6452e85ae0 2024-11-20T19:26:20,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/348c11a811b745579d0ff199fb51a8e9 is 50, key is test_row_0/C:col10/1732130779465/Put/seqid=0 2024-11-20T19:26:20,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742115_1291 (size=12151) 2024-11-20T19:26:20,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130840767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:21,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/348c11a811b745579d0ff199fb51a8e9 2024-11-20T19:26:21,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/0fc2bc4260ad4e4b99a4037c59c58aa6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/0fc2bc4260ad4e4b99a4037c59c58aa6 2024-11-20T19:26:21,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/0fc2bc4260ad4e4b99a4037c59c58aa6, entries=200, sequenceid=153, filesize=14.2 K 2024-11-20T19:26:21,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/104771bdc12844e388f6de6452e85ae0 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/104771bdc12844e388f6de6452e85ae0 2024-11-20T19:26:21,045 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/104771bdc12844e388f6de6452e85ae0, entries=150, sequenceid=153, filesize=11.9 K 2024-11-20T19:26:21,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/348c11a811b745579d0ff199fb51a8e9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/348c11a811b745579d0ff199fb51a8e9 2024-11-20T19:26:21,052 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/348c11a811b745579d0ff199fb51a8e9, entries=150, sequenceid=153, filesize=11.9 K 2024-11-20T19:26:21,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for eff3b3afe40ac2d93c9d770f2a159636 in 936ms, sequenceid=153, compaction requested=true 2024-11-20T19:26:21,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:21,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:21,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:21,054 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:21,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:21,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:21,054 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:21,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:21,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:21,056 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:21,056 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/B is initiating minor compaction (all files) 2024-11-20T19:26:21,056 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/B in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:21,056 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9062e57557ee455da326d6306667bcb1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9645a22b840e4e52bf33a612e39f2594, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d08f51c1cf2d41ebaacc6fccabc64a19, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/104771bdc12844e388f6de6452e85ae0] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=47.2 K 2024-11-20T19:26:21,056 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:21,056 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/A is initiating minor compaction (all files) 2024-11-20T19:26:21,056 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/A in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:21,056 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/caf503dfc01244ee9cc1fd0c9c6a3645, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d8de5e3986bc429290b82bbc3e447161, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/5f19800f06304587b251e5885d04d75e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/0fc2bc4260ad4e4b99a4037c59c58aa6] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=51.8 K 2024-11-20T19:26:21,057 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 9062e57557ee455da326d6306667bcb1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732130777085 2024-11-20T19:26:21,057 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting caf503dfc01244ee9cc1fd0c9c6a3645, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732130777085 2024-11-20T19:26:21,057 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 9645a22b840e4e52bf33a612e39f2594, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732130777771 2024-11-20T19:26:21,057 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8de5e3986bc429290b82bbc3e447161, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732130777766 2024-11-20T19:26:21,057 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d08f51c1cf2d41ebaacc6fccabc64a19, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732130778743 2024-11-20T19:26:21,058 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f19800f06304587b251e5885d04d75e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732130778743 2024-11-20T19:26:21,058 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fc2bc4260ad4e4b99a4037c59c58aa6, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732130779465 2024-11-20T19:26:21,058 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 104771bdc12844e388f6de6452e85ae0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732130779465 2024-11-20T19:26:21,087 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#A#compaction#237 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:21,087 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/29019d27034240768e022597ec82b687 is 50, key is test_row_0/A:col10/1732130779465/Put/seqid=0 2024-11-20T19:26:21,093 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#B#compaction#238 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:21,094 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/3ec827e7b5d34c66985754995de9744c is 50, key is test_row_0/B:col10/1732130779465/Put/seqid=0 2024-11-20T19:26:21,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742116_1292 (size=12493) 2024-11-20T19:26:21,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742117_1293 (size=12493) 2024-11-20T19:26:21,150 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/3ec827e7b5d34c66985754995de9744c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/3ec827e7b5d34c66985754995de9744c 2024-11-20T19:26:21,160 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/B of eff3b3afe40ac2d93c9d770f2a159636 into 3ec827e7b5d34c66985754995de9744c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:21,160 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:21,160 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/B, priority=12, startTime=1732130781054; duration=0sec 2024-11-20T19:26:21,161 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:21,161 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:B 2024-11-20T19:26:21,161 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:21,163 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:21,163 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/C is initiating minor compaction (all files) 2024-11-20T19:26:21,163 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/C in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:21,163 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/ac3b18977e6a4f8d83845725f2a3dfc4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/616043a3c58a47a99e18cc6fd12e3da8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/7debdfbdb7f34ac687e38e4add5718d1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/348c11a811b745579d0ff199fb51a8e9] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=47.2 K 2024-11-20T19:26:21,165 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting ac3b18977e6a4f8d83845725f2a3dfc4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732130777085 2024-11-20T19:26:21,165 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 616043a3c58a47a99e18cc6fd12e3da8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732130777771 2024-11-20T19:26:21,165 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 7debdfbdb7f34ac687e38e4add5718d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732130778743 2024-11-20T19:26:21,166 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 348c11a811b745579d0ff199fb51a8e9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732130779465 2024-11-20T19:26:21,189 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#C#compaction#239 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:21,190 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/dfb36e475de941a095b1c4232981491d is 50, key is test_row_0/C:col10/1732130779465/Put/seqid=0 2024-11-20T19:26:21,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742118_1294 (size=12493) 2024-11-20T19:26:21,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:21,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:21,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:21,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:21,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:21,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:21,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:21,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:21,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/111f8a5838da47cd9dfc4deade89b18f is 50, key is test_row_0/A:col10/1732130781290/Put/seqid=0 2024-11-20T19:26:21,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742119_1295 (size=14541) 2024-11-20T19:26:21,332 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/111f8a5838da47cd9dfc4deade89b18f 2024-11-20T19:26:21,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/22a88c5577cf40a8a96c503faf72b058 is 50, key is test_row_0/B:col10/1732130781290/Put/seqid=0 2024-11-20T19:26:21,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742120_1296 (size=12151) 2024-11-20T19:26:21,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130841432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:21,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130841538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:21,541 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/29019d27034240768e022597ec82b687 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/29019d27034240768e022597ec82b687 2024-11-20T19:26:21,546 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/A of eff3b3afe40ac2d93c9d770f2a159636 into 29019d27034240768e022597ec82b687(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:21,546 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:21,546 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/A, priority=12, startTime=1732130781053; duration=0sec 2024-11-20T19:26:21,546 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:21,546 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:A 2024-11-20T19:26:21,642 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/dfb36e475de941a095b1c4232981491d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/dfb36e475de941a095b1c4232981491d 2024-11-20T19:26:21,647 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/C of eff3b3afe40ac2d93c9d770f2a159636 into dfb36e475de941a095b1c4232981491d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:21,647 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:21,647 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/C, priority=12, startTime=1732130781054; duration=0sec 2024-11-20T19:26:21,647 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:21,647 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:C 2024-11-20T19:26:21,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130841741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:21,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/22a88c5577cf40a8a96c503faf72b058 2024-11-20T19:26:21,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/3e3e32b7c0d24f568863926738fbf811 is 50, key is test_row_0/C:col10/1732130781290/Put/seqid=0 2024-11-20T19:26:21,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T19:26:21,781 INFO [Thread-1206 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-20T19:26:21,782 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:21,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-20T19:26:21,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T19:26:21,784 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:21,784 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:21,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:21,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742121_1297 (size=12151) 2024-11-20T19:26:21,814 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/3e3e32b7c0d24f568863926738fbf811 2024-11-20T19:26:21,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/111f8a5838da47cd9dfc4deade89b18f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/111f8a5838da47cd9dfc4deade89b18f 2024-11-20T19:26:21,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/111f8a5838da47cd9dfc4deade89b18f, entries=200, sequenceid=165, filesize=14.2 K 2024-11-20T19:26:21,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/22a88c5577cf40a8a96c503faf72b058 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/22a88c5577cf40a8a96c503faf72b058 2024-11-20T19:26:21,840 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/22a88c5577cf40a8a96c503faf72b058, entries=150, sequenceid=165, filesize=11.9 K 2024-11-20T19:26:21,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/3e3e32b7c0d24f568863926738fbf811 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/3e3e32b7c0d24f568863926738fbf811 2024-11-20T19:26:21,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/3e3e32b7c0d24f568863926738fbf811, entries=150, sequenceid=165, filesize=11.9 K 2024-11-20T19:26:21,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for eff3b3afe40ac2d93c9d770f2a159636 in 553ms, sequenceid=165, compaction requested=false 2024-11-20T19:26:21,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:21,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T19:26:21,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:21,909 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:26:21,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:21,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:21,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:21,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:21,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:21,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:21,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130841919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:21,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130841920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:21,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130841921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:21,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130841922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:21,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/d9ccfceb2fe54be38aeefeab0db2db37 is 50, key is test_row_0/A:col10/1732130781908/Put/seqid=0 2024-11-20T19:26:21,936 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:21,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:21,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:21,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:21,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:21,937 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:21,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:21,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:21,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742122_1298 (size=12151) 2024-11-20T19:26:21,969 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/d9ccfceb2fe54be38aeefeab0db2db37 2024-11-20T19:26:21,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/0e7070d678e04397a2aec6ef4c06ef97 is 50, key is test_row_0/B:col10/1732130781908/Put/seqid=0 2024-11-20T19:26:22,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742123_1299 (size=12151) 2024-11-20T19:26:22,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130842026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130842027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130842029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130842029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130842044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T19:26:22,108 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:22,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:22,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130842229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130842229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130842232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130842232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,261 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:22,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:22,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T19:26:22,413 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:22,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:22,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/0e7070d678e04397a2aec6ef4c06ef97 2024-11-20T19:26:22,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/4d1a17f064d6450495af135342cc4b4b is 50, key is test_row_0/C:col10/1732130781908/Put/seqid=0 2024-11-20T19:26:22,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742124_1300 (size=12151) 2024-11-20T19:26:22,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/4d1a17f064d6450495af135342cc4b4b 2024-11-20T19:26:22,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/d9ccfceb2fe54be38aeefeab0db2db37 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d9ccfceb2fe54be38aeefeab0db2db37 2024-11-20T19:26:22,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d9ccfceb2fe54be38aeefeab0db2db37, entries=150, sequenceid=193, filesize=11.9 K 2024-11-20T19:26:22,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/0e7070d678e04397a2aec6ef4c06ef97 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/0e7070d678e04397a2aec6ef4c06ef97 2024-11-20T19:26:22,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/0e7070d678e04397a2aec6ef4c06ef97, entries=150, sequenceid=193, filesize=11.9 K 2024-11-20T19:26:22,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/4d1a17f064d6450495af135342cc4b4b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/4d1a17f064d6450495af135342cc4b4b 2024-11-20T19:26:22,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/4d1a17f064d6450495af135342cc4b4b, entries=150, sequenceid=193, filesize=11.9 K 2024-11-20T19:26:22,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for eff3b3afe40ac2d93c9d770f2a159636 in 593ms, sequenceid=193, compaction requested=true 2024-11-20T19:26:22,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:22,503 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:22,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:22,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:22,504 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:22,505 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39185 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:22,505 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/A is initiating minor compaction (all files) 2024-11-20T19:26:22,505 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/A in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,505 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/29019d27034240768e022597ec82b687, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/111f8a5838da47cd9dfc4deade89b18f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d9ccfceb2fe54be38aeefeab0db2db37] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=38.3 K 2024-11-20T19:26:22,506 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:22,506 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/B is initiating minor compaction (all files) 2024-11-20T19:26:22,506 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/B in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,506 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/3ec827e7b5d34c66985754995de9744c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/22a88c5577cf40a8a96c503faf72b058, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/0e7070d678e04397a2aec6ef4c06ef97] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=35.9 K 2024-11-20T19:26:22,506 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29019d27034240768e022597ec82b687, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732130779465 2024-11-20T19:26:22,506 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ec827e7b5d34c66985754995de9744c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732130779465 2024-11-20T19:26:22,506 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 111f8a5838da47cd9dfc4deade89b18f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1732130780125 2024-11-20T19:26:22,507 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 22a88c5577cf40a8a96c503faf72b058, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1732130780125 2024-11-20T19:26:22,507 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9ccfceb2fe54be38aeefeab0db2db37, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732130781420 2024-11-20T19:26:22,508 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e7070d678e04397a2aec6ef4c06ef97, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732130781420 2024-11-20T19:26:22,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:22,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:22,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:22,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:22,538 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#B#compaction#246 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:22,538 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/6ebaa8ddcdcb49f7b8b2b877165a5485 is 50, key is test_row_0/B:col10/1732130781908/Put/seqid=0 2024-11-20T19:26:22,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:22,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:22,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:22,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:22,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,541 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#A#compaction#247 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:22,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:22,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,541 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/52c173376ed44c41a1184d8526f7dc15 is 50, key is test_row_0/A:col10/1732130781908/Put/seqid=0 2024-11-20T19:26:22,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/7685aee2a1344c5f94dd30fc6315a9f9 is 50, key is test_row_0/A:col10/1732130781917/Put/seqid=0 2024-11-20T19:26:22,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742126_1302 (size=12595) 2024-11-20T19:26:22,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742125_1301 (size=12595) 2024-11-20T19:26:22,566 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:22,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:22,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,573 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/52c173376ed44c41a1184d8526f7dc15 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/52c173376ed44c41a1184d8526f7dc15 2024-11-20T19:26:22,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130842572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742127_1303 (size=12151) 2024-11-20T19:26:22,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130842573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130842575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,582 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/A of eff3b3afe40ac2d93c9d770f2a159636 into 52c173376ed44c41a1184d8526f7dc15(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:22,582 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:22,582 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/A, priority=13, startTime=1732130782503; duration=0sec 2024-11-20T19:26:22,582 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:22,582 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:A 2024-11-20T19:26:22,582 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:22,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/7685aee2a1344c5f94dd30fc6315a9f9 2024-11-20T19:26:22,584 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:22,584 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/C is initiating minor compaction (all files) 2024-11-20T19:26:22,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130842578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130842578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,585 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/C in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,585 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/dfb36e475de941a095b1c4232981491d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/3e3e32b7c0d24f568863926738fbf811, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/4d1a17f064d6450495af135342cc4b4b] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=35.9 K 2024-11-20T19:26:22,588 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfb36e475de941a095b1c4232981491d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732130779465 2024-11-20T19:26:22,589 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e3e32b7c0d24f568863926738fbf811, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1732130780125 2024-11-20T19:26:22,589 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d1a17f064d6450495af135342cc4b4b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732130781420 2024-11-20T19:26:22,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/859aa1d0425b47f282155ccac8361487 is 50, key is test_row_0/B:col10/1732130781917/Put/seqid=0 2024-11-20T19:26:22,621 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#C#compaction#250 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:22,621 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/37033dce86c7444fadef875616f567ae is 50, key is test_row_0/C:col10/1732130781908/Put/seqid=0 2024-11-20T19:26:22,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742128_1304 (size=12151) 2024-11-20T19:26:22,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742129_1305 (size=12595) 2024-11-20T19:26:22,679 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/37033dce86c7444fadef875616f567ae as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/37033dce86c7444fadef875616f567ae 2024-11-20T19:26:22,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130842679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130842682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,688 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/C of eff3b3afe40ac2d93c9d770f2a159636 into 37033dce86c7444fadef875616f567ae(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:22,688 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:22,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,688 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/C, priority=13, startTime=1732130782511; duration=0sec 2024-11-20T19:26:22,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130842683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,688 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:22,688 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:C 2024-11-20T19:26:22,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130842685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130842685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,719 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:22,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:22,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,721 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,874 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:22,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:22,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:22,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:22,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T19:26:22,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130842886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130842887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130842890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130842890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130842890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:22,971 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/6ebaa8ddcdcb49f7b8b2b877165a5485 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/6ebaa8ddcdcb49f7b8b2b877165a5485 2024-11-20T19:26:22,986 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/B of eff3b3afe40ac2d93c9d770f2a159636 into 6ebaa8ddcdcb49f7b8b2b877165a5485(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:22,986 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:22,986 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/B, priority=13, startTime=1732130782504; duration=0sec 2024-11-20T19:26:22,986 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:22,986 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:B 2024-11-20T19:26:23,026 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:23,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:23,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:23,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:23,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,071 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/859aa1d0425b47f282155ccac8361487 2024-11-20T19:26:23,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/38b2e96b33644fc6a324004d6e1cfd26 is 50, key is test_row_0/C:col10/1732130781917/Put/seqid=0 2024-11-20T19:26:23,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742130_1306 (size=12151) 2024-11-20T19:26:23,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/38b2e96b33644fc6a324004d6e1cfd26 2024-11-20T19:26:23,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/7685aee2a1344c5f94dd30fc6315a9f9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/7685aee2a1344c5f94dd30fc6315a9f9 2024-11-20T19:26:23,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/7685aee2a1344c5f94dd30fc6315a9f9, entries=150, sequenceid=204, filesize=11.9 K 2024-11-20T19:26:23,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/859aa1d0425b47f282155ccac8361487 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/859aa1d0425b47f282155ccac8361487 2024-11-20T19:26:23,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/859aa1d0425b47f282155ccac8361487, entries=150, sequenceid=204, filesize=11.9 K 2024-11-20T19:26:23,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/38b2e96b33644fc6a324004d6e1cfd26 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/38b2e96b33644fc6a324004d6e1cfd26 2024-11-20T19:26:23,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/38b2e96b33644fc6a324004d6e1cfd26, entries=150, sequenceid=204, filesize=11.9 K 2024-11-20T19:26:23,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for eff3b3afe40ac2d93c9d770f2a159636 in 598ms, sequenceid=204, compaction requested=false 2024-11-20T19:26:23,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:23,179 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:23,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:23,180 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:26:23,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:23,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:23,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:23,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:23,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:23,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:23,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/56e2ab45e8ea4e45877e5dbc64097782 is 50, key is test_row_0/A:col10/1732130782574/Put/seqid=0 2024-11-20T19:26:23,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:23,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:23,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742131_1307 (size=12151) 2024-11-20T19:26:23,198 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/56e2ab45e8ea4e45877e5dbc64097782 2024-11-20T19:26:23,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130843197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130843197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130843200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130843201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130843201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/eaa4307fe1b4426ab069cc866d2767af is 50, key is test_row_0/B:col10/1732130782574/Put/seqid=0 2024-11-20T19:26:23,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742132_1308 (size=12151) 2024-11-20T19:26:23,231 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/eaa4307fe1b4426ab069cc866d2767af 2024-11-20T19:26:23,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/cc0376dc5ec745e3ada5707afdbd648d is 50, key is test_row_0/C:col10/1732130782574/Put/seqid=0 2024-11-20T19:26:23,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742133_1309 (size=12151) 2024-11-20T19:26:23,293 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/cc0376dc5ec745e3ada5707afdbd648d 2024-11-20T19:26:23,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/56e2ab45e8ea4e45877e5dbc64097782 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/56e2ab45e8ea4e45877e5dbc64097782 2024-11-20T19:26:23,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130843302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130843302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130843306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,309 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/56e2ab45e8ea4e45877e5dbc64097782, entries=150, sequenceid=233, filesize=11.9 K 2024-11-20T19:26:23,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130843306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130843306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/eaa4307fe1b4426ab069cc866d2767af as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/eaa4307fe1b4426ab069cc866d2767af 2024-11-20T19:26:23,316 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/eaa4307fe1b4426ab069cc866d2767af, entries=150, sequenceid=233, filesize=11.9 K 2024-11-20T19:26:23,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/cc0376dc5ec745e3ada5707afdbd648d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/cc0376dc5ec745e3ada5707afdbd648d 2024-11-20T19:26:23,328 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/cc0376dc5ec745e3ada5707afdbd648d, entries=150, sequenceid=233, filesize=11.9 K 2024-11-20T19:26:23,330 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for eff3b3afe40ac2d93c9d770f2a159636 in 149ms, sequenceid=233, compaction requested=true 2024-11-20T19:26:23,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:23,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:23,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-20T19:26:23,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-20T19:26:23,333 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-20T19:26:23,333 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5470 sec 2024-11-20T19:26:23,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.5510 sec 2024-11-20T19:26:23,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:23,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:26:23,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:23,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:23,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:23,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:23,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:23,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:23,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/97b5e86c201541218c77f7367267354b is 50, key is test_row_0/A:col10/1732130783507/Put/seqid=0 2024-11-20T19:26:23,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130843531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130843533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130843535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130843535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130843535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742134_1310 (size=12151) 2024-11-20T19:26:23,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130843636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130843639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130843639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130843640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130843640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130843840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130843842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130843848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130843848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130843848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:23,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T19:26:23,889 INFO [Thread-1206 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-20T19:26:23,890 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:23,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-20T19:26:23,892 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:23,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T19:26:23,893 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:23,893 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:23,949 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/97b5e86c201541218c77f7367267354b 2024-11-20T19:26:23,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/f8b3f961510745849177b7cb8dd3f2c4 is 50, key is test_row_0/B:col10/1732130783507/Put/seqid=0 2024-11-20T19:26:23,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742135_1311 (size=12151) 2024-11-20T19:26:23,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T19:26:23,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/f8b3f961510745849177b7cb8dd3f2c4 2024-11-20T19:26:24,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/81ee5a704de24112a2a8ee8a42593956 is 50, key is test_row_0/C:col10/1732130783507/Put/seqid=0 2024-11-20T19:26:24,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742136_1312 (size=12151) 2024-11-20T19:26:24,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/81ee5a704de24112a2a8ee8a42593956 2024-11-20T19:26:24,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/97b5e86c201541218c77f7367267354b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/97b5e86c201541218c77f7367267354b 2024-11-20T19:26:24,047 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T19:26:24,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:24,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:24,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:24,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:24,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:24,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:24,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/97b5e86c201541218c77f7367267354b, entries=150, sequenceid=245, filesize=11.9 K 2024-11-20T19:26:24,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/f8b3f961510745849177b7cb8dd3f2c4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/f8b3f961510745849177b7cb8dd3f2c4 2024-11-20T19:26:24,060 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/f8b3f961510745849177b7cb8dd3f2c4, entries=150, sequenceid=245, filesize=11.9 K 2024-11-20T19:26:24,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/81ee5a704de24112a2a8ee8a42593956 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/81ee5a704de24112a2a8ee8a42593956 2024-11-20T19:26:24,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/81ee5a704de24112a2a8ee8a42593956, entries=150, sequenceid=245, filesize=11.9 K 2024-11-20T19:26:24,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for eff3b3afe40ac2d93c9d770f2a159636 in 563ms, sequenceid=245, compaction requested=true 2024-11-20T19:26:24,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:24,071 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:24,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:24,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:24,072 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:24,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:24,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:24,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:24,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:24,073 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:24,073 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/B is initiating minor compaction (all files) 2024-11-20T19:26:24,073 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/B in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:24,073 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/6ebaa8ddcdcb49f7b8b2b877165a5485, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/859aa1d0425b47f282155ccac8361487, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/eaa4307fe1b4426ab069cc866d2767af, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/f8b3f961510745849177b7cb8dd3f2c4] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=47.9 K 2024-11-20T19:26:24,074 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:24,074 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/A is initiating minor compaction (all files) 2024-11-20T19:26:24,074 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/A in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:24,074 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ebaa8ddcdcb49f7b8b2b877165a5485, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732130781420 2024-11-20T19:26:24,074 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/52c173376ed44c41a1184d8526f7dc15, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/7685aee2a1344c5f94dd30fc6315a9f9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/56e2ab45e8ea4e45877e5dbc64097782, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/97b5e86c201541218c77f7367267354b] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=47.9 K 2024-11-20T19:26:24,074 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52c173376ed44c41a1184d8526f7dc15, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732130781420 2024-11-20T19:26:24,075 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 859aa1d0425b47f282155ccac8361487, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732130781917 2024-11-20T19:26:24,075 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7685aee2a1344c5f94dd30fc6315a9f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732130781917 2024-11-20T19:26:24,075 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting eaa4307fe1b4426ab069cc866d2767af, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732130782573 2024-11-20T19:26:24,075 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56e2ab45e8ea4e45877e5dbc64097782, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732130782573 2024-11-20T19:26:24,076 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting f8b3f961510745849177b7cb8dd3f2c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732130783199 2024-11-20T19:26:24,076 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97b5e86c201541218c77f7367267354b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732130783199 2024-11-20T19:26:24,093 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#A#compaction#258 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:24,093 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/baced581151240849f3221eb90a6616c is 50, key is test_row_0/A:col10/1732130783507/Put/seqid=0 2024-11-20T19:26:24,102 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#B#compaction#259 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:24,103 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/771de5fdfa324f609ae3701dc3976c63 is 50, key is test_row_0/B:col10/1732130783507/Put/seqid=0 2024-11-20T19:26:24,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742137_1313 (size=12731) 2024-11-20T19:26:24,135 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/baced581151240849f3221eb90a6616c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/baced581151240849f3221eb90a6616c 2024-11-20T19:26:24,140 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/A of eff3b3afe40ac2d93c9d770f2a159636 into baced581151240849f3221eb90a6616c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:24,140 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:24,140 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/A, priority=12, startTime=1732130784071; duration=0sec 2024-11-20T19:26:24,140 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:24,140 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:A 2024-11-20T19:26:24,140 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:24,142 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:24,142 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/C is initiating minor compaction (all files) 2024-11-20T19:26:24,142 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/C in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:24,142 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/37033dce86c7444fadef875616f567ae, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/38b2e96b33644fc6a324004d6e1cfd26, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/cc0376dc5ec745e3ada5707afdbd648d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/81ee5a704de24112a2a8ee8a42593956] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=47.9 K 2024-11-20T19:26:24,143 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37033dce86c7444fadef875616f567ae, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732130781420 2024-11-20T19:26:24,143 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38b2e96b33644fc6a324004d6e1cfd26, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732130781917 2024-11-20T19:26:24,144 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc0376dc5ec745e3ada5707afdbd648d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732130782573 2024-11-20T19:26:24,145 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81ee5a704de24112a2a8ee8a42593956, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732130783199 2024-11-20T19:26:24,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742138_1314 (size=12731) 2024-11-20T19:26:24,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:24,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:26:24,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:24,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:24,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:24,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:24,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:24,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:24,160 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/29e866384edb4be7b037100adec04d9f is 50, key is test_row_0/A:col10/1732130784149/Put/seqid=0 2024-11-20T19:26:24,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130844161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130844158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130844162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130844165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130844165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,189 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#C#compaction#261 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:24,189 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/8dd76d7c1206448d9872883fad0a5bb6 is 50, key is test_row_0/C:col10/1732130783507/Put/seqid=0 2024-11-20T19:26:24,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T19:26:24,201 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T19:26:24,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:24,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:24,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:24,202 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:24,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:24,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:24,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742139_1315 (size=12301) 2024-11-20T19:26:24,205 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/29e866384edb4be7b037100adec04d9f 2024-11-20T19:26:24,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/9f7de479b198491aa840257e569eb88d is 50, key is test_row_0/B:col10/1732130784149/Put/seqid=0 2024-11-20T19:26:24,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742140_1316 (size=12731) 2024-11-20T19:26:24,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742141_1317 (size=12301) 2024-11-20T19:26:24,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/9f7de479b198491aa840257e569eb88d 2024-11-20T19:26:24,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130844266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130844266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130844270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130844270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/e3db49004fe34b4e8117b4e227913655 is 50, key is test_row_0/C:col10/1732130784149/Put/seqid=0 2024-11-20T19:26:24,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130844266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742142_1318 (size=12301) 2024-11-20T19:26:24,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/e3db49004fe34b4e8117b4e227913655 2024-11-20T19:26:24,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/29e866384edb4be7b037100adec04d9f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/29e866384edb4be7b037100adec04d9f 2024-11-20T19:26:24,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/29e866384edb4be7b037100adec04d9f, entries=150, sequenceid=272, filesize=12.0 K 2024-11-20T19:26:24,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/9f7de479b198491aa840257e569eb88d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9f7de479b198491aa840257e569eb88d 2024-11-20T19:26:24,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9f7de479b198491aa840257e569eb88d, entries=150, sequenceid=272, filesize=12.0 K 2024-11-20T19:26:24,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/e3db49004fe34b4e8117b4e227913655 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/e3db49004fe34b4e8117b4e227913655 2024-11-20T19:26:24,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/e3db49004fe34b4e8117b4e227913655, entries=150, sequenceid=272, filesize=12.0 K 2024-11-20T19:26:24,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for eff3b3afe40ac2d93c9d770f2a159636 in 176ms, sequenceid=272, compaction requested=false 2024-11-20T19:26:24,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:24,354 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T19:26:24,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:24,354 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:24,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:24,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:24,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:24,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:24,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:24,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:24,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/c4a0c5301759476aa6e356d0c145172d is 50, key is test_row_0/A:col10/1732130784164/Put/seqid=0 2024-11-20T19:26:24,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742143_1319 (size=12301) 2024-11-20T19:26:24,383 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/c4a0c5301759476aa6e356d0c145172d 2024-11-20T19:26:24,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/d62aec33984a4aa697d42547013b2f76 is 50, key is test_row_0/B:col10/1732130784164/Put/seqid=0 2024-11-20T19:26:24,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742144_1320 (size=12301) 2024-11-20T19:26:24,416 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/d62aec33984a4aa697d42547013b2f76 2024-11-20T19:26:24,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/d02d9655b94d45598f54fd0a2eb16d36 is 50, key is test_row_0/C:col10/1732130784164/Put/seqid=0 2024-11-20T19:26:24,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742145_1321 (size=12301) 2024-11-20T19:26:24,453 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/d02d9655b94d45598f54fd0a2eb16d36 2024-11-20T19:26:24,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/c4a0c5301759476aa6e356d0c145172d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/c4a0c5301759476aa6e356d0c145172d 2024-11-20T19:26:24,472 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/c4a0c5301759476aa6e356d0c145172d, entries=150, sequenceid=283, filesize=12.0 K 2024-11-20T19:26:24,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:24,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/d62aec33984a4aa697d42547013b2f76 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d62aec33984a4aa697d42547013b2f76 2024-11-20T19:26:24,482 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d62aec33984a4aa697d42547013b2f76, entries=150, sequenceid=283, filesize=12.0 K 2024-11-20T19:26:24,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/d02d9655b94d45598f54fd0a2eb16d36 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d02d9655b94d45598f54fd0a2eb16d36 2024-11-20T19:26:24,491 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d02d9655b94d45598f54fd0a2eb16d36, entries=150, sequenceid=283, filesize=12.0 K 2024-11-20T19:26:24,493 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=53.67 KB/54960 for eff3b3afe40ac2d93c9d770f2a159636 in 139ms, sequenceid=283, compaction requested=true 2024-11-20T19:26:24,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:24,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:24,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-20T19:26:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-20T19:26:24,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:26:24,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:24,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:24,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:24,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:24,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:24,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T19:26:24,497 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-20T19:26:24,497 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 602 msec 2024-11-20T19:26:24,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 608 msec 2024-11-20T19:26:24,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/6f0b7aca8bae444e97143e4784e7ccf9 is 50, key is test_row_0/A:col10/1732130784493/Put/seqid=0 2024-11-20T19:26:24,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742146_1322 (size=14741) 2024-11-20T19:26:24,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130844525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130844528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/6f0b7aca8bae444e97143e4784e7ccf9 2024-11-20T19:26:24,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130844530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130844531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130844533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,547 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/3b23928c3a8644bb93070803a69fa66a is 50, key is test_row_0/B:col10/1732130784493/Put/seqid=0 2024-11-20T19:26:24,555 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/771de5fdfa324f609ae3701dc3976c63 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/771de5fdfa324f609ae3701dc3976c63 2024-11-20T19:26:24,562 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/B of eff3b3afe40ac2d93c9d770f2a159636 into 771de5fdfa324f609ae3701dc3976c63(size=12.4 K), total size for store is 36.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:24,562 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:24,562 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/B, priority=12, startTime=1732130784072; duration=0sec 2024-11-20T19:26:24,562 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:24,562 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:B 2024-11-20T19:26:24,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742147_1323 (size=12301) 2024-11-20T19:26:24,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/3b23928c3a8644bb93070803a69fa66a 2024-11-20T19:26:24,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/ac6ca5ee7e0f4fb59436f6b981aea5f6 is 50, key is test_row_0/C:col10/1732130784493/Put/seqid=0 2024-11-20T19:26:24,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742148_1324 (size=12301) 2024-11-20T19:26:24,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130844633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130844635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130844641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130844641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130844641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,649 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/8dd76d7c1206448d9872883fad0a5bb6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/8dd76d7c1206448d9872883fad0a5bb6 2024-11-20T19:26:24,656 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/C of eff3b3afe40ac2d93c9d770f2a159636 into 8dd76d7c1206448d9872883fad0a5bb6(size=12.4 K), total size for store is 36.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:24,656 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:24,656 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/C, priority=12, startTime=1732130784073; duration=0sec 2024-11-20T19:26:24,656 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:24,656 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:C 2024-11-20T19:26:24,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130844838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130844840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130844846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130844847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130844847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:24,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T19:26:24,997 INFO [Thread-1206 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-20T19:26:24,998 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:24,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-20T19:26:24,999 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:24,999 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:25,000 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:25,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T19:26:25,013 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/ac6ca5ee7e0f4fb59436f6b981aea5f6 2024-11-20T19:26:25,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/6f0b7aca8bae444e97143e4784e7ccf9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/6f0b7aca8bae444e97143e4784e7ccf9 2024-11-20T19:26:25,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/6f0b7aca8bae444e97143e4784e7ccf9, entries=200, sequenceid=295, filesize=14.4 K 2024-11-20T19:26:25,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/3b23928c3a8644bb93070803a69fa66a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/3b23928c3a8644bb93070803a69fa66a 2024-11-20T19:26:25,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/3b23928c3a8644bb93070803a69fa66a, entries=150, sequenceid=295, filesize=12.0 K 2024-11-20T19:26:25,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/ac6ca5ee7e0f4fb59436f6b981aea5f6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/ac6ca5ee7e0f4fb59436f6b981aea5f6 2024-11-20T19:26:25,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/ac6ca5ee7e0f4fb59436f6b981aea5f6, entries=150, sequenceid=295, filesize=12.0 K 2024-11-20T19:26:25,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for eff3b3afe40ac2d93c9d770f2a159636 in 550ms, sequenceid=295, compaction requested=true 2024-11-20T19:26:25,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:25,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:25,044 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:25,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:25,044 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:25,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:25,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:25,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:25,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:25,047 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52074 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:25,047 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/A is initiating minor compaction (all files) 2024-11-20T19:26:25,047 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/A in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:25,047 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/baced581151240849f3221eb90a6616c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/29e866384edb4be7b037100adec04d9f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/c4a0c5301759476aa6e356d0c145172d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/6f0b7aca8bae444e97143e4784e7ccf9] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=50.9 K 2024-11-20T19:26:25,048 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:25,048 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/B is initiating minor compaction (all files) 2024-11-20T19:26:25,048 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/B in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:25,048 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/771de5fdfa324f609ae3701dc3976c63, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9f7de479b198491aa840257e569eb88d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d62aec33984a4aa697d42547013b2f76, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/3b23928c3a8644bb93070803a69fa66a] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=48.5 K 2024-11-20T19:26:25,048 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 771de5fdfa324f609ae3701dc3976c63, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732130783199 2024-11-20T19:26:25,048 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting baced581151240849f3221eb90a6616c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732130783199 2024-11-20T19:26:25,049 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f7de479b198491aa840257e569eb88d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732130783532 2024-11-20T19:26:25,049 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29e866384edb4be7b037100adec04d9f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732130783532 2024-11-20T19:26:25,050 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d62aec33984a4aa697d42547013b2f76, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1732130784157 2024-11-20T19:26:25,050 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4a0c5301759476aa6e356d0c145172d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1732130784157 2024-11-20T19:26:25,050 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b23928c3a8644bb93070803a69fa66a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130784487 2024-11-20T19:26:25,051 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f0b7aca8bae444e97143e4784e7ccf9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130784480 2024-11-20T19:26:25,068 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#B#compaction#270 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:25,068 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#A#compaction#271 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:25,068 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/521ce036d4504199b5fccc3484616015 is 50, key is test_row_0/B:col10/1732130784493/Put/seqid=0 2024-11-20T19:26:25,068 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/7ef4084b821c42ea9a3bd1955fdbb1d2 is 50, key is test_row_0/A:col10/1732130784493/Put/seqid=0 2024-11-20T19:26:25,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742150_1326 (size=13017) 2024-11-20T19:26:25,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T19:26:25,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742149_1325 (size=13017) 2024-11-20T19:26:25,105 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/7ef4084b821c42ea9a3bd1955fdbb1d2 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/7ef4084b821c42ea9a3bd1955fdbb1d2 2024-11-20T19:26:25,112 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/521ce036d4504199b5fccc3484616015 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/521ce036d4504199b5fccc3484616015 2024-11-20T19:26:25,118 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/A of eff3b3afe40ac2d93c9d770f2a159636 into 7ef4084b821c42ea9a3bd1955fdbb1d2(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:25,118 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:25,118 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/A, priority=12, startTime=1732130785044; duration=0sec 2024-11-20T19:26:25,118 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:25,118 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:A 2024-11-20T19:26:25,118 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:25,119 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/B of eff3b3afe40ac2d93c9d770f2a159636 into 521ce036d4504199b5fccc3484616015(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:25,119 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:25,119 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/B, priority=12, startTime=1732130785044; duration=0sec 2024-11-20T19:26:25,119 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:25,119 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:B 2024-11-20T19:26:25,135 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:25,135 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/C is initiating minor compaction (all files) 2024-11-20T19:26:25,135 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/C in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:25,135 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/8dd76d7c1206448d9872883fad0a5bb6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/e3db49004fe34b4e8117b4e227913655, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d02d9655b94d45598f54fd0a2eb16d36, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/ac6ca5ee7e0f4fb59436f6b981aea5f6] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=48.5 K 2024-11-20T19:26:25,136 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8dd76d7c1206448d9872883fad0a5bb6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732130783199 2024-11-20T19:26:25,137 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3db49004fe34b4e8117b4e227913655, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732130783532 2024-11-20T19:26:25,137 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d02d9655b94d45598f54fd0a2eb16d36, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1732130784157 2024-11-20T19:26:25,137 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac6ca5ee7e0f4fb59436f6b981aea5f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130784487 2024-11-20T19:26:25,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:26:25,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:25,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:25,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:25,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:25,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:25,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:25,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:25,151 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T19:26:25,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:25,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:25,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:25,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,161 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#C#compaction#272 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:25,162 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/f4857abdd58842dc82a26e88c6e82128 is 50, key is test_row_0/C:col10/1732130784493/Put/seqid=0 2024-11-20T19:26:25,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130845159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130845159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/88fbd025a3804fb880098f23e3fc5401 is 50, key is test_row_0/A:col10/1732130784523/Put/seqid=0 2024-11-20T19:26:25,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130845160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130845164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130845165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742151_1327 (size=13017) 2024-11-20T19:26:25,234 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/f4857abdd58842dc82a26e88c6e82128 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/f4857abdd58842dc82a26e88c6e82128 2024-11-20T19:26:25,250 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/C of eff3b3afe40ac2d93c9d770f2a159636 into f4857abdd58842dc82a26e88c6e82128(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:25,250 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:25,250 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/C, priority=12, startTime=1732130785044; duration=0sec 2024-11-20T19:26:25,250 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:25,250 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:C 2024-11-20T19:26:25,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742152_1328 (size=14741) 2024-11-20T19:26:25,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/88fbd025a3804fb880098f23e3fc5401 2024-11-20T19:26:25,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/f8b76100eb04457b8b2d0cec14a2b754 is 50, key is test_row_0/B:col10/1732130784523/Put/seqid=0 2024-11-20T19:26:25,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130845267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130845269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130845275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130845275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130845268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742153_1329 (size=12301) 2024-11-20T19:26:25,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/f8b76100eb04457b8b2d0cec14a2b754 2024-11-20T19:26:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T19:26:25,305 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T19:26:25,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:25,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:25,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:25,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/d1d76191638c4b0a9119265468259da2 is 50, key is test_row_0/C:col10/1732130784523/Put/seqid=0 2024-11-20T19:26:25,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742154_1330 (size=12301) 2024-11-20T19:26:25,356 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/d1d76191638c4b0a9119265468259da2 2024-11-20T19:26:25,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/88fbd025a3804fb880098f23e3fc5401 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/88fbd025a3804fb880098f23e3fc5401 2024-11-20T19:26:25,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/88fbd025a3804fb880098f23e3fc5401, entries=200, sequenceid=324, filesize=14.4 K 2024-11-20T19:26:25,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/f8b76100eb04457b8b2d0cec14a2b754 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/f8b76100eb04457b8b2d0cec14a2b754 2024-11-20T19:26:25,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/f8b76100eb04457b8b2d0cec14a2b754, entries=150, sequenceid=324, filesize=12.0 K 2024-11-20T19:26:25,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/d1d76191638c4b0a9119265468259da2 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d1d76191638c4b0a9119265468259da2 2024-11-20T19:26:25,378 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d1d76191638c4b0a9119265468259da2, entries=150, sequenceid=324, filesize=12.0 K 2024-11-20T19:26:25,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for eff3b3afe40ac2d93c9d770f2a159636 in 229ms, sequenceid=324, compaction requested=false 2024-11-20T19:26:25,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:25,459 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,459 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T19:26:25,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:25,460 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:25,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:25,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:25,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:25,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:25,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:25,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:25,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/597459d456554254b72f60a92d8ae5c9 is 50, key is test_row_0/A:col10/1732130785164/Put/seqid=0 2024-11-20T19:26:25,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:25,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:25,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742155_1331 (size=12301) 2024-11-20T19:26:25,491 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/597459d456554254b72f60a92d8ae5c9 2024-11-20T19:26:25,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/9d73303dbb604082ada96dad2af345a5 is 50, key is test_row_0/B:col10/1732130785164/Put/seqid=0 2024-11-20T19:26:25,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130845499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130845500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130845502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130845503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130845504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742156_1332 (size=12301) 2024-11-20T19:26:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T19:26:25,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130845605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130845605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130845608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130845609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130845609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130845808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130845808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130845814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130845814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130845815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:25,925 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/9d73303dbb604082ada96dad2af345a5 2024-11-20T19:26:25,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/7b9edf707cc34aba868c888c821c1d8c is 50, key is test_row_0/C:col10/1732130785164/Put/seqid=0 2024-11-20T19:26:25,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742157_1333 (size=12301) 2024-11-20T19:26:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T19:26:26,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130846111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130846114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130846118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130846120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130846120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,397 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/7b9edf707cc34aba868c888c821c1d8c 2024-11-20T19:26:26,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/597459d456554254b72f60a92d8ae5c9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/597459d456554254b72f60a92d8ae5c9 2024-11-20T19:26:26,412 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/597459d456554254b72f60a92d8ae5c9, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T19:26:26,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/9d73303dbb604082ada96dad2af345a5 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9d73303dbb604082ada96dad2af345a5 2024-11-20T19:26:26,420 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9d73303dbb604082ada96dad2af345a5, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T19:26:26,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/7b9edf707cc34aba868c888c821c1d8c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/7b9edf707cc34aba868c888c821c1d8c 2024-11-20T19:26:26,427 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/7b9edf707cc34aba868c888c821c1d8c, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T19:26:26,428 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for eff3b3afe40ac2d93c9d770f2a159636 in 968ms, sequenceid=336, compaction requested=true 2024-11-20T19:26:26,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:26,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:26,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-20T19:26:26,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-20T19:26:26,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-20T19:26:26,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4300 sec 2024-11-20T19:26:26,440 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.4350 sec 2024-11-20T19:26:26,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:26:26,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:26,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:26,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:26,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:26,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:26,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:26,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:26,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130846629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130846629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130846629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130846633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130846633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/34557173f3544d3ea9a00317608c3464 is 50, key is test_row_0/A:col10/1732130786621/Put/seqid=0 2024-11-20T19:26:26,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742158_1334 (size=14741) 2024-11-20T19:26:26,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130846735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130846735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130846735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130846737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130846738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130846939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130846939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130846939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130846940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:26,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130846941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,092 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/34557173f3544d3ea9a00317608c3464 2024-11-20T19:26:27,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T19:26:27,104 INFO [Thread-1206 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-20T19:26:27,105 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:27,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-20T19:26:27,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T19:26:27,119 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:27,119 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:27,119 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:27,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/a6af1d0f34b94864bea4aeeeac5dd2ab is 50, key is test_row_0/B:col10/1732130786621/Put/seqid=0 2024-11-20T19:26:27,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742159_1335 (size=12301) 2024-11-20T19:26:27,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/a6af1d0f34b94864bea4aeeeac5dd2ab 2024-11-20T19:26:27,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/b6152c2e714f4217a11dd4ca88c6e35b is 50, key is test_row_0/C:col10/1732130786621/Put/seqid=0 2024-11-20T19:26:27,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742160_1336 (size=12301) 2024-11-20T19:26:27,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/b6152c2e714f4217a11dd4ca88c6e35b 2024-11-20T19:26:27,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/34557173f3544d3ea9a00317608c3464 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/34557173f3544d3ea9a00317608c3464 2024-11-20T19:26:27,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/34557173f3544d3ea9a00317608c3464, entries=200, sequenceid=362, filesize=14.4 K 2024-11-20T19:26:27,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/a6af1d0f34b94864bea4aeeeac5dd2ab as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a6af1d0f34b94864bea4aeeeac5dd2ab 2024-11-20T19:26:27,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a6af1d0f34b94864bea4aeeeac5dd2ab, entries=150, sequenceid=362, filesize=12.0 K 2024-11-20T19:26:27,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T19:26:27,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/b6152c2e714f4217a11dd4ca88c6e35b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/b6152c2e714f4217a11dd4ca88c6e35b 2024-11-20T19:26:27,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/b6152c2e714f4217a11dd4ca88c6e35b, entries=150, sequenceid=362, filesize=12.0 K 2024-11-20T19:26:27,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for eff3b3afe40ac2d93c9d770f2a159636 in 601ms, sequenceid=362, compaction requested=true 2024-11-20T19:26:27,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:27,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:27,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:27,223 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:27,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:27,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:27,223 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:27,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:27,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:27,224 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54800 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:27,224 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/A is initiating minor compaction (all files) 2024-11-20T19:26:27,225 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/A in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,225 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/7ef4084b821c42ea9a3bd1955fdbb1d2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/88fbd025a3804fb880098f23e3fc5401, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/597459d456554254b72f60a92d8ae5c9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/34557173f3544d3ea9a00317608c3464] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=53.5 K 2024-11-20T19:26:27,226 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ef4084b821c42ea9a3bd1955fdbb1d2, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130784487 2024-11-20T19:26:27,227 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:27,227 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 88fbd025a3804fb880098f23e3fc5401, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1732130784523 2024-11-20T19:26:27,227 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/B is initiating minor compaction (all files) 2024-11-20T19:26:27,227 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/B in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,227 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/521ce036d4504199b5fccc3484616015, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/f8b76100eb04457b8b2d0cec14a2b754, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9d73303dbb604082ada96dad2af345a5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a6af1d0f34b94864bea4aeeeac5dd2ab] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=48.8 K 2024-11-20T19:26:27,227 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 521ce036d4504199b5fccc3484616015, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130784487 2024-11-20T19:26:27,228 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 597459d456554254b72f60a92d8ae5c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732130785159 2024-11-20T19:26:27,229 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8b76100eb04457b8b2d0cec14a2b754, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1732130784523 2024-11-20T19:26:27,229 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 34557173f3544d3ea9a00317608c3464, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732130785495 2024-11-20T19:26:27,231 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d73303dbb604082ada96dad2af345a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732130785159 2024-11-20T19:26:27,232 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6af1d0f34b94864bea4aeeeac5dd2ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732130785495 2024-11-20T19:26:27,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:27,248 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:26:27,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:27,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:27,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:27,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:27,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:27,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:27,257 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#A#compaction#282 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:27,258 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/123e81bcc66249f2bf82394d0413b22b is 50, key is test_row_0/A:col10/1732130786621/Put/seqid=0 2024-11-20T19:26:27,271 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#B#compaction#283 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:27,272 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/a4031f66674446b5b8dc5241f0e71007 is 50, key is test_row_0/B:col10/1732130786621/Put/seqid=0 2024-11-20T19:26:27,272 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:27,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:27,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/485ee5ef95a249baa6a82ed66571672c is 50, key is test_row_0/A:col10/1732130786631/Put/seqid=0 2024-11-20T19:26:27,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130847278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130847278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130847279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130847280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130847281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742161_1337 (size=13153) 2024-11-20T19:26:27,303 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/123e81bcc66249f2bf82394d0413b22b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/123e81bcc66249f2bf82394d0413b22b 2024-11-20T19:26:27,309 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/A of eff3b3afe40ac2d93c9d770f2a159636 into 123e81bcc66249f2bf82394d0413b22b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:27,309 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:27,309 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/A, priority=12, startTime=1732130787223; duration=0sec 2024-11-20T19:26:27,309 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:27,309 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:A 2024-11-20T19:26:27,310 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:27,311 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:27,311 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/C is initiating minor compaction (all files) 2024-11-20T19:26:27,311 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/C in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,311 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/f4857abdd58842dc82a26e88c6e82128, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d1d76191638c4b0a9119265468259da2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/7b9edf707cc34aba868c888c821c1d8c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/b6152c2e714f4217a11dd4ca88c6e35b] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=48.8 K 2024-11-20T19:26:27,312 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting f4857abdd58842dc82a26e88c6e82128, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130784487 2024-11-20T19:26:27,312 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d1d76191638c4b0a9119265468259da2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1732130784523 2024-11-20T19:26:27,313 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b9edf707cc34aba868c888c821c1d8c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732130785159 2024-11-20T19:26:27,313 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting b6152c2e714f4217a11dd4ca88c6e35b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732130785495 2024-11-20T19:26:27,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742162_1338 (size=13153) 2024-11-20T19:26:27,329 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#C#compaction#285 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:27,329 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/816bd082a742439991ad2b942da80daf is 50, key is test_row_0/C:col10/1732130786621/Put/seqid=0 2024-11-20T19:26:27,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742163_1339 (size=14741) 2024-11-20T19:26:27,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/485ee5ef95a249baa6a82ed66571672c 2024-11-20T19:26:27,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742164_1340 (size=13153) 2024-11-20T19:26:27,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/036e360aa7984e0488aec650789900e1 is 50, key is test_row_0/B:col10/1732130786631/Put/seqid=0 2024-11-20T19:26:27,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130847383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130847384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130847385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130847384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130847386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742165_1341 (size=12301) 2024-11-20T19:26:27,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/036e360aa7984e0488aec650789900e1 2024-11-20T19:26:27,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/22c04aceecd54787b7caa0b96b01636b is 50, key is test_row_0/C:col10/1732130786631/Put/seqid=0 2024-11-20T19:26:27,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T19:26:27,425 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,426 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:27,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:27,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,426 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742166_1342 (size=12301) 2024-11-20T19:26:27,599 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,603 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:27,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:27,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,604 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130847603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130847604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130847604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130847604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130847604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T19:26:27,731 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/a4031f66674446b5b8dc5241f0e71007 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a4031f66674446b5b8dc5241f0e71007 2024-11-20T19:26:27,737 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/B of eff3b3afe40ac2d93c9d770f2a159636 into a4031f66674446b5b8dc5241f0e71007(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:27,737 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:27,737 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/B, priority=12, startTime=1732130787223; duration=0sec 2024-11-20T19:26:27,737 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:27,737 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:B 2024-11-20T19:26:27,748 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/816bd082a742439991ad2b942da80daf as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/816bd082a742439991ad2b942da80daf 2024-11-20T19:26:27,755 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,756 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/C of eff3b3afe40ac2d93c9d770f2a159636 into 816bd082a742439991ad2b942da80daf(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:27,756 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:27,756 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/C, priority=12, startTime=1732130787223; duration=0sec 2024-11-20T19:26:27,756 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:27,756 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:C 2024-11-20T19:26:27,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:27,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:27,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,757 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,874 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/22c04aceecd54787b7caa0b96b01636b 2024-11-20T19:26:27,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/485ee5ef95a249baa6a82ed66571672c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/485ee5ef95a249baa6a82ed66571672c 2024-11-20T19:26:27,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/485ee5ef95a249baa6a82ed66571672c, entries=200, sequenceid=375, filesize=14.4 K 2024-11-20T19:26:27,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/036e360aa7984e0488aec650789900e1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/036e360aa7984e0488aec650789900e1 2024-11-20T19:26:27,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/036e360aa7984e0488aec650789900e1, entries=150, sequenceid=375, filesize=12.0 K 2024-11-20T19:26:27,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/22c04aceecd54787b7caa0b96b01636b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/22c04aceecd54787b7caa0b96b01636b 2024-11-20T19:26:27,908 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,908 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/22c04aceecd54787b7caa0b96b01636b, entries=150, sequenceid=375, filesize=12.0 K 2024-11-20T19:26:27,909 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:27,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:27,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:27,909 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for eff3b3afe40ac2d93c9d770f2a159636 in 663ms, sequenceid=375, compaction requested=false 2024-11-20T19:26:27,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:27,914 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:26:27,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:27,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:27,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:27,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:27,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:27,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:27,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130847918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/f0d161deffdf447894d8b687c46810e7 is 50, key is test_row_0/A:col10/1732130787279/Put/seqid=0 2024-11-20T19:26:27,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130847919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130847921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130847921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130847922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:27,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742167_1343 (size=12301) 2024-11-20T19:26:27,954 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/f0d161deffdf447894d8b687c46810e7 2024-11-20T19:26:27,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/34aa5c1556ac441792448d6c8a2ca30b is 50, key is test_row_0/B:col10/1732130787279/Put/seqid=0 2024-11-20T19:26:28,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742168_1344 (size=12301) 2024-11-20T19:26:28,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130848025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130848026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130848028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130848028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130848028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,061 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:28,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:28,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,062 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,213 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:28,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:28,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T19:26:28,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130848230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130848231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130848232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130848234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130848234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,368 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:28,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:28,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,369 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/34aa5c1556ac441792448d6c8a2ca30b 2024-11-20T19:26:28,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/22112d663b4545e1b7d3c4ad068d8cca is 50, key is test_row_0/C:col10/1732130787279/Put/seqid=0 2024-11-20T19:26:28,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742169_1345 (size=12301) 2024-11-20T19:26:28,521 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:28,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:28,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,522 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130848534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130848535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130848537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130848539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130848539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,674 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:28,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:28,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,826 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:28,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:28,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/22112d663b4545e1b7d3c4ad068d8cca 2024-11-20T19:26:28,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/f0d161deffdf447894d8b687c46810e7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/f0d161deffdf447894d8b687c46810e7 2024-11-20T19:26:28,895 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/f0d161deffdf447894d8b687c46810e7, entries=150, sequenceid=406, filesize=12.0 K 2024-11-20T19:26:28,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/34aa5c1556ac441792448d6c8a2ca30b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/34aa5c1556ac441792448d6c8a2ca30b 2024-11-20T19:26:28,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/34aa5c1556ac441792448d6c8a2ca30b, entries=150, sequenceid=406, filesize=12.0 K 2024-11-20T19:26:28,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/22112d663b4545e1b7d3c4ad068d8cca as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/22112d663b4545e1b7d3c4ad068d8cca 2024-11-20T19:26:28,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/22112d663b4545e1b7d3c4ad068d8cca, entries=150, sequenceid=406, filesize=12.0 K 2024-11-20T19:26:28,926 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for eff3b3afe40ac2d93c9d770f2a159636 in 1012ms, sequenceid=406, compaction requested=true 2024-11-20T19:26:28,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:28,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:28,926 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:28,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:28,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:28,926 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:28,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:28,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:28,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:28,927 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:28,927 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/A is initiating minor compaction (all files) 2024-11-20T19:26:28,927 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/A in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,927 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/123e81bcc66249f2bf82394d0413b22b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/485ee5ef95a249baa6a82ed66571672c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/f0d161deffdf447894d8b687c46810e7] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=39.3 K 2024-11-20T19:26:28,927 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:28,927 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/B is initiating minor compaction (all files) 2024-11-20T19:26:28,928 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/B in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,928 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a4031f66674446b5b8dc5241f0e71007, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/036e360aa7984e0488aec650789900e1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/34aa5c1556ac441792448d6c8a2ca30b] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=36.9 K 2024-11-20T19:26:28,928 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 123e81bcc66249f2bf82394d0413b22b, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732130785495 2024-11-20T19:26:28,928 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a4031f66674446b5b8dc5241f0e71007, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732130785495 2024-11-20T19:26:28,929 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 485ee5ef95a249baa6a82ed66571672c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732130786631 2024-11-20T19:26:28,929 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 036e360aa7984e0488aec650789900e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732130786631 2024-11-20T19:26:28,929 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0d161deffdf447894d8b687c46810e7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1732130787279 2024-11-20T19:26:28,930 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 34aa5c1556ac441792448d6c8a2ca30b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1732130787279 2024-11-20T19:26:28,941 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#B#compaction#291 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:28,941 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/95bf9171e25a4633b81c5856bc315787 is 50, key is test_row_0/B:col10/1732130787279/Put/seqid=0 2024-11-20T19:26:28,961 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#A#compaction#292 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:28,961 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/083a92b137784b3b864184e5039435cc is 50, key is test_row_0/A:col10/1732130787279/Put/seqid=0 2024-11-20T19:26:28,980 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:28,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:28,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:28,981 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T19:26:28,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:28,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:28,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:28,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:29,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742170_1346 (size=13255) 2024-11-20T19:26:29,012 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/95bf9171e25a4633b81c5856bc315787 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/95bf9171e25a4633b81c5856bc315787 2024-11-20T19:26:29,020 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/B of eff3b3afe40ac2d93c9d770f2a159636 into 95bf9171e25a4633b81c5856bc315787(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:29,020 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:29,020 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/B, priority=13, startTime=1732130788926; duration=0sec 2024-11-20T19:26:29,020 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:29,020 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:B 2024-11-20T19:26:29,020 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:29,026 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:29,026 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/C is initiating minor compaction (all files) 2024-11-20T19:26:29,026 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/C in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:29,026 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/816bd082a742439991ad2b942da80daf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/22c04aceecd54787b7caa0b96b01636b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/22112d663b4545e1b7d3c4ad068d8cca] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=36.9 K 2024-11-20T19:26:29,026 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 816bd082a742439991ad2b942da80daf, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732130785495 2024-11-20T19:26:29,027 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 22c04aceecd54787b7caa0b96b01636b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732130786631 2024-11-20T19:26:29,028 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 22112d663b4545e1b7d3c4ad068d8cca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1732130787279 2024-11-20T19:26:29,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742171_1347 (size=13255) 2024-11-20T19:26:29,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/647abd1cc9bd430f967caf3872c1bb91 is 50, key is test_row_1/A:col10/1732130787920/Put/seqid=0 2024-11-20T19:26:29,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:29,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:29,064 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#C#compaction#294 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:29,065 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/23f48bd8a4604ccb84ce2864aebb977f is 50, key is test_row_0/C:col10/1732130787279/Put/seqid=0 2024-11-20T19:26:29,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130849069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130849070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130849071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130849074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130849075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742172_1348 (size=9857) 2024-11-20T19:26:29,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742173_1349 (size=13255) 2024-11-20T19:26:29,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130849176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130849176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130849177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130849178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130849179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T19:26:29,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130849379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130849380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130849381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130849381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130849381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,437 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/083a92b137784b3b864184e5039435cc as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/083a92b137784b3b864184e5039435cc 2024-11-20T19:26:29,444 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/A of eff3b3afe40ac2d93c9d770f2a159636 into 083a92b137784b3b864184e5039435cc(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:29,444 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:29,444 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/A, priority=13, startTime=1732130788926; duration=0sec 2024-11-20T19:26:29,444 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:29,444 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:A 2024-11-20T19:26:29,487 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/647abd1cc9bd430f967caf3872c1bb91 2024-11-20T19:26:29,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/cf7aad5bb9a848cbb892ad7f21f95ad1 is 50, key is test_row_1/B:col10/1732130787920/Put/seqid=0 2024-11-20T19:26:29,522 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/23f48bd8a4604ccb84ce2864aebb977f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/23f48bd8a4604ccb84ce2864aebb977f 2024-11-20T19:26:29,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742174_1350 (size=9857) 2024-11-20T19:26:29,532 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/cf7aad5bb9a848cbb892ad7f21f95ad1 2024-11-20T19:26:29,535 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/C of eff3b3afe40ac2d93c9d770f2a159636 into 23f48bd8a4604ccb84ce2864aebb977f(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:29,535 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:29,535 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/C, priority=13, startTime=1732130788926; duration=0sec 2024-11-20T19:26:29,535 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:29,535 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:C 2024-11-20T19:26:29,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/03dc031a2ac3450fa70d4cd2c0ea4f23 is 50, key is test_row_1/C:col10/1732130787920/Put/seqid=0 2024-11-20T19:26:29,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742175_1351 (size=9857) 2024-11-20T19:26:29,570 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/03dc031a2ac3450fa70d4cd2c0ea4f23 2024-11-20T19:26:29,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/647abd1cc9bd430f967caf3872c1bb91 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/647abd1cc9bd430f967caf3872c1bb91 2024-11-20T19:26:29,583 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/647abd1cc9bd430f967caf3872c1bb91, entries=100, sequenceid=414, filesize=9.6 K 2024-11-20T19:26:29,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/cf7aad5bb9a848cbb892ad7f21f95ad1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/cf7aad5bb9a848cbb892ad7f21f95ad1 2024-11-20T19:26:29,597 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/cf7aad5bb9a848cbb892ad7f21f95ad1, entries=100, sequenceid=414, filesize=9.6 K 2024-11-20T19:26:29,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/03dc031a2ac3450fa70d4cd2c0ea4f23 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/03dc031a2ac3450fa70d4cd2c0ea4f23 2024-11-20T19:26:29,605 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/03dc031a2ac3450fa70d4cd2c0ea4f23, entries=100, sequenceid=414, filesize=9.6 K 2024-11-20T19:26:29,606 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for eff3b3afe40ac2d93c9d770f2a159636 in 625ms, sequenceid=414, compaction requested=false 2024-11-20T19:26:29,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:29,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:29,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-20T19:26:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-20T19:26:29,608 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-20T19:26:29,608 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4880 sec 2024-11-20T19:26:29,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 2.5040 sec 2024-11-20T19:26:29,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:29,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-20T19:26:29,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:29,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:29,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:29,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:29,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:29,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:29,687 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/8d7c52e2ef244ac3a5bcba696ef622c8 is 50, key is test_row_0/A:col10/1732130789682/Put/seqid=0 2024-11-20T19:26:29,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130849687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130849688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130849689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130849689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742176_1352 (size=12301) 2024-11-20T19:26:29,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130849689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,774 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T19:26:29,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130849788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130849788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130849990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:29,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130849991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:30,091 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/8d7c52e2ef244ac3a5bcba696ef622c8 2024-11-20T19:26:30,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/098f275a03a54a2dacd774722744b9d8 is 50, key is test_row_0/B:col10/1732130789682/Put/seqid=0 2024-11-20T19:26:30,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742177_1353 (size=12301) 2024-11-20T19:26:30,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:30,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130850192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:30,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:30,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130850193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:30,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:30,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130850196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:30,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:30,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130850293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:30,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:30,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130850295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:30,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/098f275a03a54a2dacd774722744b9d8 2024-11-20T19:26:30,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/d589efc2012a4527b53d1cd4efd77dc9 is 50, key is test_row_0/C:col10/1732130789682/Put/seqid=0 2024-11-20T19:26:30,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742178_1354 (size=12301) 2024-11-20T19:26:30,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:30,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130850796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:30,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:30,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130850797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:30,911 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/d589efc2012a4527b53d1cd4efd77dc9 2024-11-20T19:26:30,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/8d7c52e2ef244ac3a5bcba696ef622c8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/8d7c52e2ef244ac3a5bcba696ef622c8 2024-11-20T19:26:30,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/8d7c52e2ef244ac3a5bcba696ef622c8, entries=150, sequenceid=447, filesize=12.0 K 2024-11-20T19:26:30,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/098f275a03a54a2dacd774722744b9d8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/098f275a03a54a2dacd774722744b9d8 2024-11-20T19:26:30,922 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/098f275a03a54a2dacd774722744b9d8, entries=150, sequenceid=447, filesize=12.0 K 2024-11-20T19:26:30,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/d589efc2012a4527b53d1cd4efd77dc9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d589efc2012a4527b53d1cd4efd77dc9 2024-11-20T19:26:30,926 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d589efc2012a4527b53d1cd4efd77dc9, entries=150, sequenceid=447, filesize=12.0 K 2024-11-20T19:26:30,926 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=26.84 KB/27480 for eff3b3afe40ac2d93c9d770f2a159636 in 1244ms, sequenceid=447, compaction requested=true 2024-11-20T19:26:30,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:30,927 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:30,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:30,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:30,927 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:30,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:30,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:30,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:30,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:30,928 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:30,928 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/A is initiating minor compaction (all files) 2024-11-20T19:26:30,928 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/A in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:30,928 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:30,928 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/B is initiating minor compaction (all files) 2024-11-20T19:26:30,928 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/083a92b137784b3b864184e5039435cc, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/647abd1cc9bd430f967caf3872c1bb91, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/8d7c52e2ef244ac3a5bcba696ef622c8] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=34.6 K 2024-11-20T19:26:30,928 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/B in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:30,928 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/95bf9171e25a4633b81c5856bc315787, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/cf7aad5bb9a848cbb892ad7f21f95ad1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/098f275a03a54a2dacd774722744b9d8] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=34.6 K 2024-11-20T19:26:30,928 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95bf9171e25a4633b81c5856bc315787, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1732130787279 2024-11-20T19:26:30,928 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 083a92b137784b3b864184e5039435cc, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1732130787279 2024-11-20T19:26:30,928 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf7aad5bb9a848cbb892ad7f21f95ad1, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732130787918 2024-11-20T19:26:30,928 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 647abd1cc9bd430f967caf3872c1bb91, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732130787918 2024-11-20T19:26:30,929 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 098f275a03a54a2dacd774722744b9d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1732130789069 2024-11-20T19:26:30,929 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d7c52e2ef244ac3a5bcba696ef622c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1732130789069 2024-11-20T19:26:30,935 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#B#compaction#300 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:30,935 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/93df8975d1f74878873416d215ea8ffb is 50, key is test_row_0/B:col10/1732130789682/Put/seqid=0 2024-11-20T19:26:30,938 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#A#compaction#301 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:30,939 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/65d7943aa0184fe081533a678187438f is 50, key is test_row_0/A:col10/1732130789682/Put/seqid=0 2024-11-20T19:26:30,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742179_1355 (size=13357) 2024-11-20T19:26:30,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742180_1356 (size=13357) 2024-11-20T19:26:30,946 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/93df8975d1f74878873416d215ea8ffb as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/93df8975d1f74878873416d215ea8ffb 2024-11-20T19:26:30,947 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/65d7943aa0184fe081533a678187438f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/65d7943aa0184fe081533a678187438f 2024-11-20T19:26:30,952 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/A of eff3b3afe40ac2d93c9d770f2a159636 into 65d7943aa0184fe081533a678187438f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:30,952 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/B of eff3b3afe40ac2d93c9d770f2a159636 into 93df8975d1f74878873416d215ea8ffb(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:30,952 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:30,952 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:30,952 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/A, priority=13, startTime=1732130790927; duration=0sec 2024-11-20T19:26:30,952 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/B, priority=13, startTime=1732130790927; duration=0sec 2024-11-20T19:26:30,952 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:30,952 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:30,952 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:A 2024-11-20T19:26:30,952 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:B 2024-11-20T19:26:30,952 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:30,953 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:30,953 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/C is initiating minor compaction (all files) 2024-11-20T19:26:30,953 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/C in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:30,953 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/23f48bd8a4604ccb84ce2864aebb977f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/03dc031a2ac3450fa70d4cd2c0ea4f23, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d589efc2012a4527b53d1cd4efd77dc9] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=34.6 K 2024-11-20T19:26:30,953 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 23f48bd8a4604ccb84ce2864aebb977f, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1732130787279 2024-11-20T19:26:30,954 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 03dc031a2ac3450fa70d4cd2c0ea4f23, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732130787918 2024-11-20T19:26:30,954 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d589efc2012a4527b53d1cd4efd77dc9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1732130789069 2024-11-20T19:26:30,959 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#C#compaction#302 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:30,960 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/b8f7a4f92cf145b1b8474ebe4a8fbd39 is 50, key is test_row_0/C:col10/1732130789682/Put/seqid=0 2024-11-20T19:26:30,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742181_1357 (size=13357) 2024-11-20T19:26:31,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:31,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:31,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:31,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:31,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:31,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:31,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:31,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:31,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/cd44324aff0047eea7cd4a5011a163c4 is 50, key is test_row_0/A:col10/1732130791204/Put/seqid=0 2024-11-20T19:26:31,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742182_1358 (size=14741) 2024-11-20T19:26:31,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T19:26:31,222 INFO [Thread-1206 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-20T19:26:31,223 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:31,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-20T19:26:31,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:31,224 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:31,224 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:31,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:31,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130851229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130851231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130851231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:31,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130851333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130851333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130851333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,369 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/b8f7a4f92cf145b1b8474ebe4a8fbd39 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/b8f7a4f92cf145b1b8474ebe4a8fbd39 2024-11-20T19:26:31,373 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/C of eff3b3afe40ac2d93c9d770f2a159636 into b8f7a4f92cf145b1b8474ebe4a8fbd39(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:31,373 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:31,373 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/C, priority=13, startTime=1732130790928; duration=0sec 2024-11-20T19:26:31,373 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:31,373 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:C 2024-11-20T19:26:31,375 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,376 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:31,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:31,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:31,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:31,376 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:31,528 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:31,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:31,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:31,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:31,528 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130851535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130851536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130851536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,613 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/cd44324aff0047eea7cd4a5011a163c4 2024-11-20T19:26:31,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/b3aab0c6cadd4a4687361d4306c9037e is 50, key is test_row_0/B:col10/1732130791204/Put/seqid=0 2024-11-20T19:26:31,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742183_1359 (size=12301) 2024-11-20T19:26:31,624 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/b3aab0c6cadd4a4687361d4306c9037e 2024-11-20T19:26:31,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/eb3afcb1f3c5428d8e2d76530b255c5b is 50, key is test_row_0/C:col10/1732130791204/Put/seqid=0 2024-11-20T19:26:31,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742184_1360 (size=12301) 2024-11-20T19:26:31,680 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:31,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:31,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:31,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:31,681 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130851803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130851807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:31,832 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:31,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:31,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:31,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:31,833 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130851838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130851839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:31,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130851840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,984 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:31,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:31,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:31,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:31,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:31,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:31,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:32,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/eb3afcb1f3c5428d8e2d76530b255c5b 2024-11-20T19:26:32,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/cd44324aff0047eea7cd4a5011a163c4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/cd44324aff0047eea7cd4a5011a163c4 2024-11-20T19:26:32,042 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/cd44324aff0047eea7cd4a5011a163c4, entries=200, sequenceid=461, filesize=14.4 K 2024-11-20T19:26:32,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/b3aab0c6cadd4a4687361d4306c9037e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/b3aab0c6cadd4a4687361d4306c9037e 2024-11-20T19:26:32,046 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/b3aab0c6cadd4a4687361d4306c9037e, entries=150, sequenceid=461, filesize=12.0 K 2024-11-20T19:26:32,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/eb3afcb1f3c5428d8e2d76530b255c5b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/eb3afcb1f3c5428d8e2d76530b255c5b 2024-11-20T19:26:32,050 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/eb3afcb1f3c5428d8e2d76530b255c5b, entries=150, sequenceid=461, filesize=12.0 K 2024-11-20T19:26:32,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for eff3b3afe40ac2d93c9d770f2a159636 in 844ms, sequenceid=461, compaction requested=false 2024-11-20T19:26:32,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:32,137 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:32,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:32,137 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:26:32,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:32,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:32,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:32,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:32,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:32,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:32,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/b898411eda454b3a94b3286a5bc30fef is 50, key is test_row_0/A:col10/1732130791228/Put/seqid=0 2024-11-20T19:26:32,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742185_1361 (size=12301) 2024-11-20T19:26:32,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:32,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:32,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. as already flushing 2024-11-20T19:26:32,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:32,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130852350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:32,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130852351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:32,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130852351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:32,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130852454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:32,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130852454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:32,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130852454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,572 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=486 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/b898411eda454b3a94b3286a5bc30fef 2024-11-20T19:26:32,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/84cd0f1b4aa8496cb6b60a67562a08fb is 50, key is test_row_0/B:col10/1732130791228/Put/seqid=0 2024-11-20T19:26:32,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742186_1362 (size=12301) 2024-11-20T19:26:32,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:32,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130852655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:32,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130852656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:32,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130852657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:32,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130852958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:32,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130852958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:32,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130852960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:32,982 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=486 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/84cd0f1b4aa8496cb6b60a67562a08fb 2024-11-20T19:26:32,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/84915af15c784a5490307b16df6359cf is 50, key is test_row_0/C:col10/1732130791228/Put/seqid=0 2024-11-20T19:26:32,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742187_1363 (size=12301) 2024-11-20T19:26:33,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:33,392 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=486 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/84915af15c784a5490307b16df6359cf 2024-11-20T19:26:33,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/b898411eda454b3a94b3286a5bc30fef as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/b898411eda454b3a94b3286a5bc30fef 2024-11-20T19:26:33,399 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/b898411eda454b3a94b3286a5bc30fef, entries=150, sequenceid=486, filesize=12.0 K 2024-11-20T19:26:33,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/84cd0f1b4aa8496cb6b60a67562a08fb as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/84cd0f1b4aa8496cb6b60a67562a08fb 2024-11-20T19:26:33,403 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/84cd0f1b4aa8496cb6b60a67562a08fb, entries=150, sequenceid=486, filesize=12.0 K 2024-11-20T19:26:33,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/84915af15c784a5490307b16df6359cf as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/84915af15c784a5490307b16df6359cf 2024-11-20T19:26:33,406 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/84915af15c784a5490307b16df6359cf, entries=150, sequenceid=486, filesize=12.0 K 2024-11-20T19:26:33,407 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for eff3b3afe40ac2d93c9d770f2a159636 in 1270ms, sequenceid=486, compaction requested=true 2024-11-20T19:26:33,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:33,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:33,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-20T19:26:33,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-20T19:26:33,409 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-20T19:26:33,409 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1840 sec 2024-11-20T19:26:33,410 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 2.1860 sec 2024-11-20T19:26:33,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:33,463 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:26:33,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:33,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:33,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:33,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:33,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:33,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:33,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/b54c60b4ef7343089bca1ea9fcace691 is 50, key is test_row_0/A:col10/1732130792347/Put/seqid=0 2024-11-20T19:26:33,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742188_1364 (size=14741) 2024-11-20T19:26:33,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:33,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130853515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:33,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:33,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130853515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:33,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:33,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130853516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:33,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:33,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130853618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:33,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:33,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130853619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:33,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:33,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130853619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:33,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:33,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58066 deadline: 1732130853820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:33,821 DEBUG [Thread-1198 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4134 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., hostname=db9c3a6c6492,41229,1732130701496, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:26:33,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:33,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58018 deadline: 1732130853820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:33,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:33,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:33,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130853821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:33,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130853821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:33,825 DEBUG [Thread-1200 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., hostname=db9c3a6c6492,41229,1732130701496, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:26:33,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:33,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130853822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:33,875 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/b54c60b4ef7343089bca1ea9fcace691 2024-11-20T19:26:33,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/8dfd934c4bab41adb513a1c2181127eb is 50, key is test_row_0/B:col10/1732130792347/Put/seqid=0 2024-11-20T19:26:33,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742189_1365 (size=12301) 2024-11-20T19:26:34,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130854126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:34,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130854126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:34,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:34,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130854126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:34,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/8dfd934c4bab41adb513a1c2181127eb 2024-11-20T19:26:34,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/71c6c74f26ac48b098aaefff74b09b51 is 50, key is test_row_0/C:col10/1732130792347/Put/seqid=0 2024-11-20T19:26:34,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742190_1366 (size=12301) 2024-11-20T19:26:34,471 DEBUG [Thread-1213 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e998dd3 to 127.0.0.1:49985 2024-11-20T19:26:34,472 DEBUG [Thread-1213 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:34,472 DEBUG [Thread-1215 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e4c79b8 to 127.0.0.1:49985 2024-11-20T19:26:34,472 DEBUG [Thread-1215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:34,473 DEBUG [Thread-1211 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x088aa519 to 127.0.0.1:49985 2024-11-20T19:26:34,473 DEBUG [Thread-1211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:34,473 DEBUG [Thread-1207 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17cf7fc0 to 127.0.0.1:49985 2024-11-20T19:26:34,473 DEBUG [Thread-1207 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:34,474 DEBUG [Thread-1209 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78b04266 to 127.0.0.1:49985 2024-11-20T19:26:34,475 DEBUG [Thread-1209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:34,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:34,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:34,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58026 deadline: 1732130854630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:34,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58042 deadline: 1732130854630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:34,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:34,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58006 deadline: 1732130854632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:34,703 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/71c6c74f26ac48b098aaefff74b09b51 2024-11-20T19:26:34,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/b54c60b4ef7343089bca1ea9fcace691 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/b54c60b4ef7343089bca1ea9fcace691 2024-11-20T19:26:34,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/b54c60b4ef7343089bca1ea9fcace691, entries=200, sequenceid=499, filesize=14.4 K 2024-11-20T19:26:34,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/8dfd934c4bab41adb513a1c2181127eb as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/8dfd934c4bab41adb513a1c2181127eb 2024-11-20T19:26:34,720 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/8dfd934c4bab41adb513a1c2181127eb, entries=150, sequenceid=499, filesize=12.0 K 2024-11-20T19:26:34,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/71c6c74f26ac48b098aaefff74b09b51 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/71c6c74f26ac48b098aaefff74b09b51 2024-11-20T19:26:34,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/71c6c74f26ac48b098aaefff74b09b51, entries=150, sequenceid=499, filesize=12.0 K 2024-11-20T19:26:34,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for eff3b3afe40ac2d93c9d770f2a159636 in 1262ms, sequenceid=499, compaction requested=true 2024-11-20T19:26:34,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:34,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:34,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:34,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:34,725 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:34,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:34,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eff3b3afe40ac2d93c9d770f2a159636:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:34,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:34,725 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:34,726 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55140 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:34,726 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:34,726 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/A is initiating minor compaction (all files) 2024-11-20T19:26:34,726 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/B is initiating minor compaction (all files) 2024-11-20T19:26:34,726 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/A in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:34,726 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/B in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:34,726 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/65d7943aa0184fe081533a678187438f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/cd44324aff0047eea7cd4a5011a163c4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/b898411eda454b3a94b3286a5bc30fef, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/b54c60b4ef7343089bca1ea9fcace691] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=53.8 K 2024-11-20T19:26:34,726 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/93df8975d1f74878873416d215ea8ffb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/b3aab0c6cadd4a4687361d4306c9037e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/84cd0f1b4aa8496cb6b60a67562a08fb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/8dfd934c4bab41adb513a1c2181127eb] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=49.1 K 2024-11-20T19:26:34,726 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65d7943aa0184fe081533a678187438f, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1732130789069 2024-11-20T19:26:34,726 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 93df8975d1f74878873416d215ea8ffb, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1732130789069 2024-11-20T19:26:34,727 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd44324aff0047eea7cd4a5011a163c4, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732130791203 2024-11-20T19:26:34,727 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting b3aab0c6cadd4a4687361d4306c9037e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732130791204 2024-11-20T19:26:34,727 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting b898411eda454b3a94b3286a5bc30fef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732130791223 2024-11-20T19:26:34,727 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 84cd0f1b4aa8496cb6b60a67562a08fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732130791223 2024-11-20T19:26:34,727 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting b54c60b4ef7343089bca1ea9fcace691, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1732130792347 2024-11-20T19:26:34,727 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8dfd934c4bab41adb513a1c2181127eb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1732130792347 2024-11-20T19:26:34,733 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#B#compaction#313 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:34,733 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#A#compaction#312 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:34,734 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/17449dea4edc4bf78be7d6a2562d87bf is 50, key is test_row_0/B:col10/1732130792347/Put/seqid=0 2024-11-20T19:26:34,734 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/12906b135a4848fca7812cf3662db9ea is 50, key is test_row_0/A:col10/1732130792347/Put/seqid=0 2024-11-20T19:26:34,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742192_1368 (size=13493) 2024-11-20T19:26:34,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742191_1367 (size=13493) 2024-11-20T19:26:35,150 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/12906b135a4848fca7812cf3662db9ea as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/12906b135a4848fca7812cf3662db9ea 2024-11-20T19:26:35,150 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/17449dea4edc4bf78be7d6a2562d87bf as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/17449dea4edc4bf78be7d6a2562d87bf 2024-11-20T19:26:35,153 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/A of eff3b3afe40ac2d93c9d770f2a159636 into 12906b135a4848fca7812cf3662db9ea(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:35,153 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/B of eff3b3afe40ac2d93c9d770f2a159636 into 17449dea4edc4bf78be7d6a2562d87bf(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:35,153 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:35,153 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:35,153 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/A, priority=12, startTime=1732130794725; duration=0sec 2024-11-20T19:26:35,153 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/B, priority=12, startTime=1732130794725; duration=0sec 2024-11-20T19:26:35,153 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:35,153 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:A 2024-11-20T19:26:35,153 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:35,153 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:B 2024-11-20T19:26:35,153 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:35,154 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:35,154 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): eff3b3afe40ac2d93c9d770f2a159636/C is initiating minor compaction (all files) 2024-11-20T19:26:35,154 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eff3b3afe40ac2d93c9d770f2a159636/C in TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:35,155 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/b8f7a4f92cf145b1b8474ebe4a8fbd39, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/eb3afcb1f3c5428d8e2d76530b255c5b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/84915af15c784a5490307b16df6359cf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/71c6c74f26ac48b098aaefff74b09b51] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp, totalSize=49.1 K 2024-11-20T19:26:35,155 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8f7a4f92cf145b1b8474ebe4a8fbd39, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1732130789069 2024-11-20T19:26:35,155 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb3afcb1f3c5428d8e2d76530b255c5b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732130791204 2024-11-20T19:26:35,155 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84915af15c784a5490307b16df6359cf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732130791223 2024-11-20T19:26:35,156 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71c6c74f26ac48b098aaefff74b09b51, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1732130792347 2024-11-20T19:26:35,162 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eff3b3afe40ac2d93c9d770f2a159636#C#compaction#314 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:35,162 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/28c9a96d4997451ca9903eba53d5b6f9 is 50, key is test_row_0/C:col10/1732130792347/Put/seqid=0 2024-11-20T19:26:35,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742193_1369 (size=13493) 2024-11-20T19:26:35,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:35,330 INFO [Thread-1206 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-20T19:26:35,577 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/28c9a96d4997451ca9903eba53d5b6f9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/28c9a96d4997451ca9903eba53d5b6f9 2024-11-20T19:26:35,583 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eff3b3afe40ac2d93c9d770f2a159636/C of eff3b3afe40ac2d93c9d770f2a159636 into 28c9a96d4997451ca9903eba53d5b6f9(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:35,583 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:35,583 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636., storeName=eff3b3afe40ac2d93c9d770f2a159636/C, priority=12, startTime=1732130794725; duration=0sec 2024-11-20T19:26:35,583 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:35,583 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eff3b3afe40ac2d93c9d770f2a159636:C 2024-11-20T19:26:35,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:35,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:26:35,637 DEBUG [Thread-1204 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07e55eb7 to 127.0.0.1:49985 2024-11-20T19:26:35,637 DEBUG [Thread-1196 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58341641 to 127.0.0.1:49985 2024-11-20T19:26:35,637 DEBUG [Thread-1204 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:35,637 DEBUG [Thread-1196 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:35,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:35,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:35,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:35,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:35,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:35,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:35,640 DEBUG [Thread-1202 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683b64c3 to 127.0.0.1:49985 2024-11-20T19:26:35,640 DEBUG [Thread-1202 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:35,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/5b439f6c0cd547848054b85b1932b679 is 50, key is test_row_0/A:col10/1732130795635/Put/seqid=0 2024-11-20T19:26:35,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742194_1370 (size=12301) 2024-11-20T19:26:36,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=527 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/5b439f6c0cd547848054b85b1932b679 2024-11-20T19:26:36,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/a4888101b5934569a6e43dc48825d11a is 50, key is test_row_0/B:col10/1732130795635/Put/seqid=0 2024-11-20T19:26:36,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742195_1371 (size=12301) 2024-11-20T19:26:36,463 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=527 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/a4888101b5934569a6e43dc48825d11a 2024-11-20T19:26:36,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/8a225c643eab491797b1c74dbe7fa969 is 50, key is test_row_0/C:col10/1732130795635/Put/seqid=0 2024-11-20T19:26:36,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742196_1372 (size=12301) 2024-11-20T19:26:36,874 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=527 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/8a225c643eab491797b1c74dbe7fa969 2024-11-20T19:26:36,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/5b439f6c0cd547848054b85b1932b679 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/5b439f6c0cd547848054b85b1932b679 2024-11-20T19:26:36,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/5b439f6c0cd547848054b85b1932b679, entries=150, sequenceid=527, filesize=12.0 K 2024-11-20T19:26:36,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/a4888101b5934569a6e43dc48825d11a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a4888101b5934569a6e43dc48825d11a 2024-11-20T19:26:36,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a4888101b5934569a6e43dc48825d11a, entries=150, sequenceid=527, filesize=12.0 K 2024-11-20T19:26:36,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/8a225c643eab491797b1c74dbe7fa969 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/8a225c643eab491797b1c74dbe7fa969 2024-11-20T19:26:36,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/8a225c643eab491797b1c74dbe7fa969, entries=150, sequenceid=527, filesize=12.0 K 2024-11-20T19:26:36,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=6.71 KB/6870 for eff3b3afe40ac2d93c9d770f2a159636 in 1261ms, sequenceid=527, compaction requested=false 2024-11-20T19:26:36,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:37,859 DEBUG [Thread-1200 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64ee0130 to 127.0.0.1:49985 2024-11-20T19:26:37,859 DEBUG [Thread-1200 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:37,861 DEBUG [Thread-1198 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x44645c55 to 127.0.0.1:49985 2024-11-20T19:26:37,861 DEBUG [Thread-1198 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 97 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 101 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4706 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4649 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4578 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4684 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4670 2024-11-20T19:26:37,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T19:26:37,861 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:26:37,862 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b70f48f to 127.0.0.1:49985 2024-11-20T19:26:37,862 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:37,862 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T19:26:37,862 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T19:26:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T19:26:37,865 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130797864"}]},"ts":"1732130797864"} 2024-11-20T19:26:37,865 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T19:26:37,913 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T19:26:37,914 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:26:37,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=eff3b3afe40ac2d93c9d770f2a159636, UNASSIGN}] 2024-11-20T19:26:37,916 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=eff3b3afe40ac2d93c9d770f2a159636, UNASSIGN 2024-11-20T19:26:37,916 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=eff3b3afe40ac2d93c9d770f2a159636, regionState=CLOSING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:37,917 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:26:37,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; CloseRegionProcedure eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:26:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T19:26:38,068 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:38,069 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(124): Close eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:38,069 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:26:38,069 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1681): Closing eff3b3afe40ac2d93c9d770f2a159636, disabling compactions & flushes 2024-11-20T19:26:38,069 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:38,069 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:38,069 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. after waiting 0 ms 2024-11-20T19:26:38,069 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:38,069 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(2837): Flushing eff3b3afe40ac2d93c9d770f2a159636 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-20T19:26:38,069 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=A 2024-11-20T19:26:38,069 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:38,069 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=B 2024-11-20T19:26:38,069 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:38,069 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eff3b3afe40ac2d93c9d770f2a159636, store=C 2024-11-20T19:26:38,070 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:38,072 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/d109384f24af4f6f9b29444e68f7585e is 50, key is test_row_0/A:col10/1732130797860/Put/seqid=0 2024-11-20T19:26:38,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742197_1373 (size=9857) 2024-11-20T19:26:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T19:26:38,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T19:26:38,476 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=533 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/d109384f24af4f6f9b29444e68f7585e 2024-11-20T19:26:38,496 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/952f1e04f78449feb68fe66fec0d3b47 is 50, key is test_row_0/B:col10/1732130797860/Put/seqid=0 2024-11-20T19:26:38,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742198_1374 (size=9857) 2024-11-20T19:26:38,900 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=533 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/952f1e04f78449feb68fe66fec0d3b47 2024-11-20T19:26:38,906 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/9502dd2ba584490ca95a88e467ccd367 is 50, key is test_row_0/C:col10/1732130797860/Put/seqid=0 2024-11-20T19:26:38,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742199_1375 (size=9857) 2024-11-20T19:26:38,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T19:26:39,344 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=533 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/9502dd2ba584490ca95a88e467ccd367 2024-11-20T19:26:39,347 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/A/d109384f24af4f6f9b29444e68f7585e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d109384f24af4f6f9b29444e68f7585e 2024-11-20T19:26:39,350 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d109384f24af4f6f9b29444e68f7585e, entries=100, sequenceid=533, filesize=9.6 K 2024-11-20T19:26:39,351 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/B/952f1e04f78449feb68fe66fec0d3b47 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/952f1e04f78449feb68fe66fec0d3b47 2024-11-20T19:26:39,355 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/952f1e04f78449feb68fe66fec0d3b47, entries=100, sequenceid=533, filesize=9.6 K 2024-11-20T19:26:39,356 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/.tmp/C/9502dd2ba584490ca95a88e467ccd367 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/9502dd2ba584490ca95a88e467ccd367 2024-11-20T19:26:39,359 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/9502dd2ba584490ca95a88e467ccd367, entries=100, sequenceid=533, filesize=9.6 K 2024-11-20T19:26:39,360 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for eff3b3afe40ac2d93c9d770f2a159636 in 1291ms, sequenceid=533, compaction requested=true 2024-11-20T19:26:39,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/c9a7831a5dbf4eba8b6daa6f037f366d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/e4e416d99de944a58026ddbd591abeee, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/10c4186ed0ba43caadd8b49975eebe4c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/bfc28a4541e0402a942aa6c0c7ce109b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/1df33656f25e44968a7b46e3bda51779, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/caf503dfc01244ee9cc1fd0c9c6a3645, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/a81962eaa9ec493b8c92cae70406201e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d8de5e3986bc429290b82bbc3e447161, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/5f19800f06304587b251e5885d04d75e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/0fc2bc4260ad4e4b99a4037c59c58aa6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/29019d27034240768e022597ec82b687, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/111f8a5838da47cd9dfc4deade89b18f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/52c173376ed44c41a1184d8526f7dc15, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d9ccfceb2fe54be38aeefeab0db2db37, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/7685aee2a1344c5f94dd30fc6315a9f9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/56e2ab45e8ea4e45877e5dbc64097782, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/baced581151240849f3221eb90a6616c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/97b5e86c201541218c77f7367267354b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/29e866384edb4be7b037100adec04d9f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/c4a0c5301759476aa6e356d0c145172d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/6f0b7aca8bae444e97143e4784e7ccf9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/7ef4084b821c42ea9a3bd1955fdbb1d2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/88fbd025a3804fb880098f23e3fc5401, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/597459d456554254b72f60a92d8ae5c9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/34557173f3544d3ea9a00317608c3464, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/123e81bcc66249f2bf82394d0413b22b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/485ee5ef95a249baa6a82ed66571672c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/083a92b137784b3b864184e5039435cc, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/f0d161deffdf447894d8b687c46810e7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/647abd1cc9bd430f967caf3872c1bb91, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/65d7943aa0184fe081533a678187438f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/8d7c52e2ef244ac3a5bcba696ef622c8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/cd44324aff0047eea7cd4a5011a163c4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/b898411eda454b3a94b3286a5bc30fef, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/b54c60b4ef7343089bca1ea9fcace691] to archive 2024-11-20T19:26:39,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:26:39,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/c9a7831a5dbf4eba8b6daa6f037f366d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/c9a7831a5dbf4eba8b6daa6f037f366d 2024-11-20T19:26:39,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/e4e416d99de944a58026ddbd591abeee to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/e4e416d99de944a58026ddbd591abeee 2024-11-20T19:26:39,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/10c4186ed0ba43caadd8b49975eebe4c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/10c4186ed0ba43caadd8b49975eebe4c 2024-11-20T19:26:39,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/bfc28a4541e0402a942aa6c0c7ce109b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/bfc28a4541e0402a942aa6c0c7ce109b 2024-11-20T19:26:39,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/1df33656f25e44968a7b46e3bda51779 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/1df33656f25e44968a7b46e3bda51779 2024-11-20T19:26:39,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/caf503dfc01244ee9cc1fd0c9c6a3645 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/caf503dfc01244ee9cc1fd0c9c6a3645 2024-11-20T19:26:39,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/a81962eaa9ec493b8c92cae70406201e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/a81962eaa9ec493b8c92cae70406201e 2024-11-20T19:26:39,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d8de5e3986bc429290b82bbc3e447161 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d8de5e3986bc429290b82bbc3e447161 2024-11-20T19:26:39,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/5f19800f06304587b251e5885d04d75e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/5f19800f06304587b251e5885d04d75e 2024-11-20T19:26:39,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/0fc2bc4260ad4e4b99a4037c59c58aa6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/0fc2bc4260ad4e4b99a4037c59c58aa6 2024-11-20T19:26:39,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/29019d27034240768e022597ec82b687 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/29019d27034240768e022597ec82b687 2024-11-20T19:26:39,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/111f8a5838da47cd9dfc4deade89b18f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/111f8a5838da47cd9dfc4deade89b18f 2024-11-20T19:26:39,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/52c173376ed44c41a1184d8526f7dc15 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/52c173376ed44c41a1184d8526f7dc15 2024-11-20T19:26:39,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d9ccfceb2fe54be38aeefeab0db2db37 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d9ccfceb2fe54be38aeefeab0db2db37 2024-11-20T19:26:39,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/7685aee2a1344c5f94dd30fc6315a9f9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/7685aee2a1344c5f94dd30fc6315a9f9 2024-11-20T19:26:39,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/56e2ab45e8ea4e45877e5dbc64097782 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/56e2ab45e8ea4e45877e5dbc64097782 2024-11-20T19:26:39,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/baced581151240849f3221eb90a6616c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/baced581151240849f3221eb90a6616c 2024-11-20T19:26:39,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/97b5e86c201541218c77f7367267354b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/97b5e86c201541218c77f7367267354b 2024-11-20T19:26:39,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/29e866384edb4be7b037100adec04d9f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/29e866384edb4be7b037100adec04d9f 2024-11-20T19:26:39,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/c4a0c5301759476aa6e356d0c145172d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/c4a0c5301759476aa6e356d0c145172d 2024-11-20T19:26:39,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/6f0b7aca8bae444e97143e4784e7ccf9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/6f0b7aca8bae444e97143e4784e7ccf9 2024-11-20T19:26:39,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/7ef4084b821c42ea9a3bd1955fdbb1d2 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/7ef4084b821c42ea9a3bd1955fdbb1d2 2024-11-20T19:26:39,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/88fbd025a3804fb880098f23e3fc5401 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/88fbd025a3804fb880098f23e3fc5401 2024-11-20T19:26:39,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/597459d456554254b72f60a92d8ae5c9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/597459d456554254b72f60a92d8ae5c9 2024-11-20T19:26:39,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/34557173f3544d3ea9a00317608c3464 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/34557173f3544d3ea9a00317608c3464 2024-11-20T19:26:39,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/123e81bcc66249f2bf82394d0413b22b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/123e81bcc66249f2bf82394d0413b22b 2024-11-20T19:26:39,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/485ee5ef95a249baa6a82ed66571672c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/485ee5ef95a249baa6a82ed66571672c 2024-11-20T19:26:39,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/083a92b137784b3b864184e5039435cc to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/083a92b137784b3b864184e5039435cc 2024-11-20T19:26:39,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/f0d161deffdf447894d8b687c46810e7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/f0d161deffdf447894d8b687c46810e7 2024-11-20T19:26:39,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/647abd1cc9bd430f967caf3872c1bb91 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/647abd1cc9bd430f967caf3872c1bb91 2024-11-20T19:26:39,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/65d7943aa0184fe081533a678187438f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/65d7943aa0184fe081533a678187438f 2024-11-20T19:26:39,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/8d7c52e2ef244ac3a5bcba696ef622c8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/8d7c52e2ef244ac3a5bcba696ef622c8 2024-11-20T19:26:39,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/cd44324aff0047eea7cd4a5011a163c4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/cd44324aff0047eea7cd4a5011a163c4 2024-11-20T19:26:39,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/b898411eda454b3a94b3286a5bc30fef to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/b898411eda454b3a94b3286a5bc30fef 2024-11-20T19:26:39,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/b54c60b4ef7343089bca1ea9fcace691 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/b54c60b4ef7343089bca1ea9fcace691 2024-11-20T19:26:39,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/1df6287842de4424b09fa696f0585378, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/c45b69bedd754053a26d5cd4bcec490e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/8d85a89313aa4c5fa5833ff59cb8d59c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d57f19803e32443da90ad9c0fe911e7e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/fcc4143c6ff34710ab09ff7f9d8dfeff, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9062e57557ee455da326d6306667bcb1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/35200128acc5479d888a78e90595bc81, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9645a22b840e4e52bf33a612e39f2594, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d08f51c1cf2d41ebaacc6fccabc64a19, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/3ec827e7b5d34c66985754995de9744c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/104771bdc12844e388f6de6452e85ae0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/22a88c5577cf40a8a96c503faf72b058, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/6ebaa8ddcdcb49f7b8b2b877165a5485, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/0e7070d678e04397a2aec6ef4c06ef97, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/859aa1d0425b47f282155ccac8361487, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/eaa4307fe1b4426ab069cc866d2767af, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/771de5fdfa324f609ae3701dc3976c63, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/f8b3f961510745849177b7cb8dd3f2c4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9f7de479b198491aa840257e569eb88d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d62aec33984a4aa697d42547013b2f76, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/521ce036d4504199b5fccc3484616015, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/3b23928c3a8644bb93070803a69fa66a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/f8b76100eb04457b8b2d0cec14a2b754, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9d73303dbb604082ada96dad2af345a5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a4031f66674446b5b8dc5241f0e71007, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a6af1d0f34b94864bea4aeeeac5dd2ab, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/036e360aa7984e0488aec650789900e1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/95bf9171e25a4633b81c5856bc315787, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/34aa5c1556ac441792448d6c8a2ca30b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/cf7aad5bb9a848cbb892ad7f21f95ad1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/93df8975d1f74878873416d215ea8ffb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/098f275a03a54a2dacd774722744b9d8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/b3aab0c6cadd4a4687361d4306c9037e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/84cd0f1b4aa8496cb6b60a67562a08fb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/8dfd934c4bab41adb513a1c2181127eb] to archive 2024-11-20T19:26:39,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:26:39,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/1df6287842de4424b09fa696f0585378 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/1df6287842de4424b09fa696f0585378 2024-11-20T19:26:39,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/c45b69bedd754053a26d5cd4bcec490e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/c45b69bedd754053a26d5cd4bcec490e 2024-11-20T19:26:39,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/8d85a89313aa4c5fa5833ff59cb8d59c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/8d85a89313aa4c5fa5833ff59cb8d59c 2024-11-20T19:26:39,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d57f19803e32443da90ad9c0fe911e7e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d57f19803e32443da90ad9c0fe911e7e 2024-11-20T19:26:39,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/fcc4143c6ff34710ab09ff7f9d8dfeff to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/fcc4143c6ff34710ab09ff7f9d8dfeff 2024-11-20T19:26:39,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9062e57557ee455da326d6306667bcb1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9062e57557ee455da326d6306667bcb1 2024-11-20T19:26:39,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/35200128acc5479d888a78e90595bc81 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/35200128acc5479d888a78e90595bc81 2024-11-20T19:26:39,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9645a22b840e4e52bf33a612e39f2594 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9645a22b840e4e52bf33a612e39f2594 2024-11-20T19:26:39,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d08f51c1cf2d41ebaacc6fccabc64a19 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d08f51c1cf2d41ebaacc6fccabc64a19 2024-11-20T19:26:39,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/3ec827e7b5d34c66985754995de9744c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/3ec827e7b5d34c66985754995de9744c 2024-11-20T19:26:39,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/104771bdc12844e388f6de6452e85ae0 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/104771bdc12844e388f6de6452e85ae0 2024-11-20T19:26:39,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/22a88c5577cf40a8a96c503faf72b058 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/22a88c5577cf40a8a96c503faf72b058 2024-11-20T19:26:39,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/6ebaa8ddcdcb49f7b8b2b877165a5485 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/6ebaa8ddcdcb49f7b8b2b877165a5485 2024-11-20T19:26:39,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/0e7070d678e04397a2aec6ef4c06ef97 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/0e7070d678e04397a2aec6ef4c06ef97 2024-11-20T19:26:39,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/859aa1d0425b47f282155ccac8361487 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/859aa1d0425b47f282155ccac8361487 2024-11-20T19:26:39,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/eaa4307fe1b4426ab069cc866d2767af to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/eaa4307fe1b4426ab069cc866d2767af 2024-11-20T19:26:39,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/771de5fdfa324f609ae3701dc3976c63 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/771de5fdfa324f609ae3701dc3976c63 2024-11-20T19:26:39,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/f8b3f961510745849177b7cb8dd3f2c4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/f8b3f961510745849177b7cb8dd3f2c4 2024-11-20T19:26:39,407 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9f7de479b198491aa840257e569eb88d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9f7de479b198491aa840257e569eb88d 2024-11-20T19:26:39,409 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d62aec33984a4aa697d42547013b2f76 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/d62aec33984a4aa697d42547013b2f76 2024-11-20T19:26:39,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/521ce036d4504199b5fccc3484616015 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/521ce036d4504199b5fccc3484616015 2024-11-20T19:26:39,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/3b23928c3a8644bb93070803a69fa66a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/3b23928c3a8644bb93070803a69fa66a 2024-11-20T19:26:39,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/f8b76100eb04457b8b2d0cec14a2b754 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/f8b76100eb04457b8b2d0cec14a2b754 2024-11-20T19:26:39,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9d73303dbb604082ada96dad2af345a5 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/9d73303dbb604082ada96dad2af345a5 2024-11-20T19:26:39,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a4031f66674446b5b8dc5241f0e71007 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a4031f66674446b5b8dc5241f0e71007 2024-11-20T19:26:39,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a6af1d0f34b94864bea4aeeeac5dd2ab to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a6af1d0f34b94864bea4aeeeac5dd2ab 2024-11-20T19:26:39,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/036e360aa7984e0488aec650789900e1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/036e360aa7984e0488aec650789900e1 2024-11-20T19:26:39,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/95bf9171e25a4633b81c5856bc315787 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/95bf9171e25a4633b81c5856bc315787 2024-11-20T19:26:39,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/34aa5c1556ac441792448d6c8a2ca30b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/34aa5c1556ac441792448d6c8a2ca30b 2024-11-20T19:26:39,424 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/cf7aad5bb9a848cbb892ad7f21f95ad1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/cf7aad5bb9a848cbb892ad7f21f95ad1 2024-11-20T19:26:39,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/93df8975d1f74878873416d215ea8ffb to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/93df8975d1f74878873416d215ea8ffb 2024-11-20T19:26:39,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/098f275a03a54a2dacd774722744b9d8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/098f275a03a54a2dacd774722744b9d8 2024-11-20T19:26:39,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/b3aab0c6cadd4a4687361d4306c9037e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/b3aab0c6cadd4a4687361d4306c9037e 2024-11-20T19:26:39,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/84cd0f1b4aa8496cb6b60a67562a08fb to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/84cd0f1b4aa8496cb6b60a67562a08fb 2024-11-20T19:26:39,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/8dfd934c4bab41adb513a1c2181127eb to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/8dfd934c4bab41adb513a1c2181127eb 2024-11-20T19:26:39,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/2efa636ae6374fb09125f6c0cee4c043, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/eb39484812db4d3788c67ad4444e3ec9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/3df0d0fcf1ec4b149265bc06941420c5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/48f404cd23074ccdbf4930978eba2413, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/06109dcd480c47dcbfb90aafef197082, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/ac3b18977e6a4f8d83845725f2a3dfc4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/831e02577f0c4c339c0cea91928c2e28, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/616043a3c58a47a99e18cc6fd12e3da8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/7debdfbdb7f34ac687e38e4add5718d1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/dfb36e475de941a095b1c4232981491d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/348c11a811b745579d0ff199fb51a8e9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/3e3e32b7c0d24f568863926738fbf811, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/37033dce86c7444fadef875616f567ae, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/4d1a17f064d6450495af135342cc4b4b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/38b2e96b33644fc6a324004d6e1cfd26, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/cc0376dc5ec745e3ada5707afdbd648d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/8dd76d7c1206448d9872883fad0a5bb6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/81ee5a704de24112a2a8ee8a42593956, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/e3db49004fe34b4e8117b4e227913655, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d02d9655b94d45598f54fd0a2eb16d36, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/f4857abdd58842dc82a26e88c6e82128, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/ac6ca5ee7e0f4fb59436f6b981aea5f6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d1d76191638c4b0a9119265468259da2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/7b9edf707cc34aba868c888c821c1d8c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/816bd082a742439991ad2b942da80daf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/b6152c2e714f4217a11dd4ca88c6e35b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/22c04aceecd54787b7caa0b96b01636b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/23f48bd8a4604ccb84ce2864aebb977f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/22112d663b4545e1b7d3c4ad068d8cca, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/03dc031a2ac3450fa70d4cd2c0ea4f23, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/b8f7a4f92cf145b1b8474ebe4a8fbd39, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d589efc2012a4527b53d1cd4efd77dc9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/eb3afcb1f3c5428d8e2d76530b255c5b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/84915af15c784a5490307b16df6359cf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/71c6c74f26ac48b098aaefff74b09b51] to archive 2024-11-20T19:26:39,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:26:39,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/2efa636ae6374fb09125f6c0cee4c043 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/2efa636ae6374fb09125f6c0cee4c043 2024-11-20T19:26:39,436 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/eb39484812db4d3788c67ad4444e3ec9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/eb39484812db4d3788c67ad4444e3ec9 2024-11-20T19:26:39,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/3df0d0fcf1ec4b149265bc06941420c5 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/3df0d0fcf1ec4b149265bc06941420c5 2024-11-20T19:26:39,438 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/48f404cd23074ccdbf4930978eba2413 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/48f404cd23074ccdbf4930978eba2413 2024-11-20T19:26:39,439 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/06109dcd480c47dcbfb90aafef197082 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/06109dcd480c47dcbfb90aafef197082 2024-11-20T19:26:39,440 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/ac3b18977e6a4f8d83845725f2a3dfc4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/ac3b18977e6a4f8d83845725f2a3dfc4 2024-11-20T19:26:39,441 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/831e02577f0c4c339c0cea91928c2e28 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/831e02577f0c4c339c0cea91928c2e28 2024-11-20T19:26:39,441 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/616043a3c58a47a99e18cc6fd12e3da8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/616043a3c58a47a99e18cc6fd12e3da8 2024-11-20T19:26:39,442 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/7debdfbdb7f34ac687e38e4add5718d1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/7debdfbdb7f34ac687e38e4add5718d1 2024-11-20T19:26:39,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/dfb36e475de941a095b1c4232981491d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/dfb36e475de941a095b1c4232981491d 2024-11-20T19:26:39,445 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/348c11a811b745579d0ff199fb51a8e9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/348c11a811b745579d0ff199fb51a8e9 2024-11-20T19:26:39,447 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/3e3e32b7c0d24f568863926738fbf811 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/3e3e32b7c0d24f568863926738fbf811 2024-11-20T19:26:39,448 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/37033dce86c7444fadef875616f567ae to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/37033dce86c7444fadef875616f567ae 2024-11-20T19:26:39,449 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/4d1a17f064d6450495af135342cc4b4b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/4d1a17f064d6450495af135342cc4b4b 2024-11-20T19:26:39,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/38b2e96b33644fc6a324004d6e1cfd26 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/38b2e96b33644fc6a324004d6e1cfd26 2024-11-20T19:26:39,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/cc0376dc5ec745e3ada5707afdbd648d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/cc0376dc5ec745e3ada5707afdbd648d 2024-11-20T19:26:39,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/8dd76d7c1206448d9872883fad0a5bb6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/8dd76d7c1206448d9872883fad0a5bb6 2024-11-20T19:26:39,452 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/81ee5a704de24112a2a8ee8a42593956 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/81ee5a704de24112a2a8ee8a42593956 2024-11-20T19:26:39,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/e3db49004fe34b4e8117b4e227913655 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/e3db49004fe34b4e8117b4e227913655 2024-11-20T19:26:39,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d02d9655b94d45598f54fd0a2eb16d36 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d02d9655b94d45598f54fd0a2eb16d36 2024-11-20T19:26:39,456 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/f4857abdd58842dc82a26e88c6e82128 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/f4857abdd58842dc82a26e88c6e82128 2024-11-20T19:26:39,456 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/ac6ca5ee7e0f4fb59436f6b981aea5f6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/ac6ca5ee7e0f4fb59436f6b981aea5f6 2024-11-20T19:26:39,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d1d76191638c4b0a9119265468259da2 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d1d76191638c4b0a9119265468259da2 2024-11-20T19:26:39,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/7b9edf707cc34aba868c888c821c1d8c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/7b9edf707cc34aba868c888c821c1d8c 2024-11-20T19:26:39,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/816bd082a742439991ad2b942da80daf to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/816bd082a742439991ad2b942da80daf 2024-11-20T19:26:39,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/b6152c2e714f4217a11dd4ca88c6e35b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/b6152c2e714f4217a11dd4ca88c6e35b 2024-11-20T19:26:39,460 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/22c04aceecd54787b7caa0b96b01636b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/22c04aceecd54787b7caa0b96b01636b 2024-11-20T19:26:39,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/23f48bd8a4604ccb84ce2864aebb977f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/23f48bd8a4604ccb84ce2864aebb977f 2024-11-20T19:26:39,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/22112d663b4545e1b7d3c4ad068d8cca to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/22112d663b4545e1b7d3c4ad068d8cca 2024-11-20T19:26:39,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/03dc031a2ac3450fa70d4cd2c0ea4f23 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/03dc031a2ac3450fa70d4cd2c0ea4f23 2024-11-20T19:26:39,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/b8f7a4f92cf145b1b8474ebe4a8fbd39 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/b8f7a4f92cf145b1b8474ebe4a8fbd39 2024-11-20T19:26:39,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d589efc2012a4527b53d1cd4efd77dc9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/d589efc2012a4527b53d1cd4efd77dc9 2024-11-20T19:26:39,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/eb3afcb1f3c5428d8e2d76530b255c5b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/eb3afcb1f3c5428d8e2d76530b255c5b 2024-11-20T19:26:39,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/84915af15c784a5490307b16df6359cf to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/84915af15c784a5490307b16df6359cf 2024-11-20T19:26:39,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/71c6c74f26ac48b098aaefff74b09b51 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/71c6c74f26ac48b098aaefff74b09b51 2024-11-20T19:26:39,475 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/recovered.edits/536.seqid, newMaxSeqId=536, maxSeqId=1 2024-11-20T19:26:39,475 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636. 2024-11-20T19:26:39,475 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1635): Region close journal for eff3b3afe40ac2d93c9d770f2a159636: 2024-11-20T19:26:39,477 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(170): Closed eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:39,477 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=eff3b3afe40ac2d93c9d770f2a159636, regionState=CLOSED 2024-11-20T19:26:39,480 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-20T19:26:39,481 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseRegionProcedure eff3b3afe40ac2d93c9d770f2a159636, server=db9c3a6c6492,41229,1732130701496 in 1.5610 sec 2024-11-20T19:26:39,482 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=89, resume processing ppid=88 2024-11-20T19:26:39,482 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, ppid=88, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=eff3b3afe40ac2d93c9d770f2a159636, UNASSIGN in 1.5660 sec 2024-11-20T19:26:39,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-20T19:26:39,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5680 sec 2024-11-20T19:26:39,484 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130799484"}]},"ts":"1732130799484"} 2024-11-20T19:26:39,485 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T19:26:39,497 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T19:26:39,499 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6360 sec 2024-11-20T19:26:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T19:26:39,968 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-20T19:26:39,968 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T19:26:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:39,970 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=91, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T19:26:39,970 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=91, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:39,971 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:39,973 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/recovered.edits] 2024-11-20T19:26:39,975 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/12906b135a4848fca7812cf3662db9ea to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/12906b135a4848fca7812cf3662db9ea 2024-11-20T19:26:39,976 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/5b439f6c0cd547848054b85b1932b679 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/5b439f6c0cd547848054b85b1932b679 2024-11-20T19:26:39,976 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d109384f24af4f6f9b29444e68f7585e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/A/d109384f24af4f6f9b29444e68f7585e 2024-11-20T19:26:39,978 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/17449dea4edc4bf78be7d6a2562d87bf to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/17449dea4edc4bf78be7d6a2562d87bf 2024-11-20T19:26:39,979 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/952f1e04f78449feb68fe66fec0d3b47 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/952f1e04f78449feb68fe66fec0d3b47 2024-11-20T19:26:39,980 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a4888101b5934569a6e43dc48825d11a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/B/a4888101b5934569a6e43dc48825d11a 2024-11-20T19:26:39,982 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/28c9a96d4997451ca9903eba53d5b6f9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/28c9a96d4997451ca9903eba53d5b6f9 2024-11-20T19:26:39,983 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/8a225c643eab491797b1c74dbe7fa969 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/8a225c643eab491797b1c74dbe7fa969 2024-11-20T19:26:39,984 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/9502dd2ba584490ca95a88e467ccd367 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/C/9502dd2ba584490ca95a88e467ccd367 2024-11-20T19:26:39,986 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/recovered.edits/536.seqid to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636/recovered.edits/536.seqid 2024-11-20T19:26:39,987 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/eff3b3afe40ac2d93c9d770f2a159636 2024-11-20T19:26:39,987 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T19:26:39,988 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=91, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:39,990 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T19:26:39,991 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T19:26:39,992 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=91, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:39,992 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T19:26:39,992 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732130799992"}]},"ts":"9223372036854775807"} 2024-11-20T19:26:39,994 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T19:26:39,994 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => eff3b3afe40ac2d93c9d770f2a159636, NAME => 'TestAcidGuarantees,,1732130772115.eff3b3afe40ac2d93c9d770f2a159636.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T19:26:39,994 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T19:26:39,994 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732130799994"}]},"ts":"9223372036854775807"} 2024-11-20T19:26:39,996 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T19:26:40,039 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=91, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:40,040 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 71 msec 2024-11-20T19:26:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T19:26:40,071 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-20T19:26:40,081 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=239 (was 237) - Thread LEAK? -, OpenFileDescriptor=452 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=646 (was 647), ProcessCount=11 (was 11), AvailableMemoryMB=3320 (was 3477) 2024-11-20T19:26:40,090 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=239, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=646, ProcessCount=11, AvailableMemoryMB=3320 2024-11-20T19:26:40,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:26:40,092 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:26:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:40,093 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:26:40,093 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:40,093 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 92 2024-11-20T19:26:40,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-20T19:26:40,094 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:26:40,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742200_1376 (size=960) 2024-11-20T19:26:40,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-20T19:26:40,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-20T19:26:40,500 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203 2024-11-20T19:26:40,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742201_1377 (size=53) 2024-11-20T19:26:40,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-20T19:26:40,905 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:26:40,905 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 2d2917374eb6d0879b57e7f84a3c009e, disabling compactions & flushes 2024-11-20T19:26:40,905 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:40,905 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:40,905 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. after waiting 0 ms 2024-11-20T19:26:40,905 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:40,905 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:40,905 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:40,906 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:26:40,906 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732130800906"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130800906"}]},"ts":"1732130800906"} 2024-11-20T19:26:40,907 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:26:40,908 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:26:40,908 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130800908"}]},"ts":"1732130800908"} 2024-11-20T19:26:40,908 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T19:26:40,955 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d2917374eb6d0879b57e7f84a3c009e, ASSIGN}] 2024-11-20T19:26:40,956 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d2917374eb6d0879b57e7f84a3c009e, ASSIGN 2024-11-20T19:26:40,956 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d2917374eb6d0879b57e7f84a3c009e, ASSIGN; state=OFFLINE, location=db9c3a6c6492,41229,1732130701496; forceNewPlan=false, retain=false 2024-11-20T19:26:41,107 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=2d2917374eb6d0879b57e7f84a3c009e, regionState=OPENING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:41,108 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; OpenRegionProcedure 2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:26:41,163 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T19:26:41,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-20T19:26:41,259 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:41,262 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:41,262 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7285): Opening region: {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:26:41,262 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:41,262 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:26:41,262 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7327): checking encryption for 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:41,263 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7330): checking classloading for 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:41,267 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:41,268 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:41,269 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d2917374eb6d0879b57e7f84a3c009e columnFamilyName A 2024-11-20T19:26:41,269 DEBUG [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:41,269 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.HStore(327): Store=2d2917374eb6d0879b57e7f84a3c009e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:41,269 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:41,270 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:41,270 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d2917374eb6d0879b57e7f84a3c009e columnFamilyName B 2024-11-20T19:26:41,270 DEBUG [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:41,271 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.HStore(327): Store=2d2917374eb6d0879b57e7f84a3c009e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:41,271 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:41,271 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:41,271 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d2917374eb6d0879b57e7f84a3c009e columnFamilyName C 2024-11-20T19:26:41,272 DEBUG [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:41,272 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.HStore(327): Store=2d2917374eb6d0879b57e7f84a3c009e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:41,272 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:41,273 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:41,273 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:41,274 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:26:41,275 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1085): writing seq id for 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:41,276 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:26:41,277 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1102): Opened 2d2917374eb6d0879b57e7f84a3c009e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70861821, jitterRate=0.05592341721057892}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:26:41,277 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1001): Region open journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:41,278 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., pid=94, masterSystemTime=1732130801259 2024-11-20T19:26:41,279 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:41,279 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:41,279 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=2d2917374eb6d0879b57e7f84a3c009e, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:41,281 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-20T19:26:41,281 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; OpenRegionProcedure 2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 in 172 msec 2024-11-20T19:26:41,282 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-11-20T19:26:41,282 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d2917374eb6d0879b57e7f84a3c009e, ASSIGN in 326 msec 2024-11-20T19:26:41,282 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:26:41,283 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130801282"}]},"ts":"1732130801282"} 2024-11-20T19:26:41,283 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T19:26:41,322 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:26:41,323 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2300 sec 2024-11-20T19:26:42,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-20T19:26:42,198 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 92 completed 2024-11-20T19:26:42,200 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68ad882f to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f5b2180 2024-11-20T19:26:42,210 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34becda3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:42,211 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:42,212 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48666, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:42,213 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:26:42,214 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:26:42,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:26:42,215 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:26:42,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:42,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742202_1378 (size=996) 2024-11-20T19:26:42,626 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T19:26:42,626 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T19:26:42,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:26:42,641 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d2917374eb6d0879b57e7f84a3c009e, REOPEN/MOVE}] 2024-11-20T19:26:42,642 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d2917374eb6d0879b57e7f84a3c009e, REOPEN/MOVE 2024-11-20T19:26:42,643 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=2d2917374eb6d0879b57e7f84a3c009e, regionState=CLOSING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:42,644 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:26:42,645 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; CloseRegionProcedure 2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:26:42,798 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:42,799 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(124): Close 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:42,799 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:26:42,799 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1681): Closing 2d2917374eb6d0879b57e7f84a3c009e, disabling compactions & flushes 2024-11-20T19:26:42,799 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:42,799 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:42,799 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. after waiting 0 ms 2024-11-20T19:26:42,799 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:42,802 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T19:26:42,803 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:42,803 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1635): Region close journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:42,803 WARN [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionServer(3786): Not adding moved region record: 2d2917374eb6d0879b57e7f84a3c009e to self. 2024-11-20T19:26:42,804 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(170): Closed 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:42,805 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=2d2917374eb6d0879b57e7f84a3c009e, regionState=CLOSED 2024-11-20T19:26:42,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-20T19:26:42,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; CloseRegionProcedure 2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 in 160 msec 2024-11-20T19:26:42,807 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d2917374eb6d0879b57e7f84a3c009e, REOPEN/MOVE; state=CLOSED, location=db9c3a6c6492,41229,1732130701496; forceNewPlan=false, retain=true 2024-11-20T19:26:42,957 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=2d2917374eb6d0879b57e7f84a3c009e, regionState=OPENING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:42,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=97, state=RUNNABLE; OpenRegionProcedure 2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:26:43,110 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,112 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:43,112 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7285): Opening region: {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:26:43,113 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:43,113 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:26:43,113 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7327): checking encryption for 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:43,113 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7330): checking classloading for 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:43,114 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:43,115 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:43,115 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d2917374eb6d0879b57e7f84a3c009e columnFamilyName A 2024-11-20T19:26:43,118 DEBUG [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:43,119 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.HStore(327): Store=2d2917374eb6d0879b57e7f84a3c009e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:43,120 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:43,121 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:43,121 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d2917374eb6d0879b57e7f84a3c009e columnFamilyName B 2024-11-20T19:26:43,121 DEBUG [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:43,121 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.HStore(327): Store=2d2917374eb6d0879b57e7f84a3c009e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:43,121 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:43,122 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:43,122 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d2917374eb6d0879b57e7f84a3c009e columnFamilyName C 2024-11-20T19:26:43,122 DEBUG [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:43,122 INFO [StoreOpener-2d2917374eb6d0879b57e7f84a3c009e-1 {}] regionserver.HStore(327): Store=2d2917374eb6d0879b57e7f84a3c009e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:43,123 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:43,123 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:43,124 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:43,125 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:26:43,126 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1085): writing seq id for 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:43,127 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1102): Opened 2d2917374eb6d0879b57e7f84a3c009e; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64462540, jitterRate=-0.039433300495147705}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:26:43,128 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1001): Region open journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:43,128 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., pid=99, masterSystemTime=1732130803110 2024-11-20T19:26:43,129 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:43,129 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:43,130 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=2d2917374eb6d0879b57e7f84a3c009e, regionState=OPEN, openSeqNum=5, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,132 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=97 2024-11-20T19:26:43,132 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=97, state=SUCCESS; OpenRegionProcedure 2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 in 172 msec 2024-11-20T19:26:43,133 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-11-20T19:26:43,133 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d2917374eb6d0879b57e7f84a3c009e, REOPEN/MOVE in 491 msec 2024-11-20T19:26:43,134 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-20T19:26:43,134 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 497 msec 2024-11-20T19:26:43,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 919 msec 2024-11-20T19:26:43,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T19:26:43,138 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b976e1a to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1df61dc9 2024-11-20T19:26:43,221 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fe71801, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:43,222 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b82ba2a to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3637e4c6 2024-11-20T19:26:43,235 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51f7d511, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:43,236 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b6cf8cb to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72f422b4 2024-11-20T19:26:43,248 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc42ea6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:43,249 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ec15031 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2df33cdf 2024-11-20T19:26:43,264 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117e86d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:43,265 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dd5b441 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9f472e0 2024-11-20T19:26:43,285 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd96549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:43,286 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x167a78b0 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31aea41b 2024-11-20T19:26:43,297 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3875c8c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:43,298 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5aee939b to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e247aa1 2024-11-20T19:26:43,310 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@801ba40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:43,311 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f49665c to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2205f666 2024-11-20T19:26:43,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27539bdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:43,323 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683f8469 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6584e9ce 2024-11-20T19:26:43,335 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3203d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:43,336 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e4d3d0 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37ec8e3b 2024-11-20T19:26:43,347 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798e7fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:43,360 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:43,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-11-20T19:26:43,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T19:26:43,362 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:43,363 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:43,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:43,371 DEBUG [hconnection-0xfa08ff4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:43,372 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48670, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:43,379 DEBUG [hconnection-0x56b8c267-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:43,380 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48684, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:43,387 DEBUG [hconnection-0x5a72223d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:43,388 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48694, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:43,389 DEBUG [hconnection-0x406e448-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:43,390 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48700, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:43,397 DEBUG [hconnection-0x59bba5e2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:43,398 DEBUG [hconnection-0x46ab931d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:43,398 DEBUG [hconnection-0x5f567b45-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:43,398 DEBUG [hconnection-0x1d9df028-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:43,399 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48716, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:43,400 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:43,400 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48714, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:43,400 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48728, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:43,407 DEBUG [hconnection-0x29409c61-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:43,408 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:43,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:43,416 DEBUG [hconnection-0x17a5f512-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:43,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:26:43,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:43,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:43,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:43,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:43,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:43,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:43,420 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48744, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:43,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130863439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130863440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130863444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130863444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130863443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T19:26:43,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112049e3106d9508447aa3bf4724ce755b9d_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130803378/Put/seqid=0 2024-11-20T19:26:43,514 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:43,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:43,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:43,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:43,515 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742203_1379 (size=9714) 2024-11-20T19:26:43,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130863541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130863541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130863545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130863546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130863548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T19:26:43,666 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:43,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:43,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:43,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:43,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130863743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130863744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130863747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130863748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130863755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,819 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:43,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:43,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:43,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:43,820 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,921 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:43,926 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112049e3106d9508447aa3bf4724ce755b9d_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112049e3106d9508447aa3bf4724ce755b9d_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:43,927 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/1380df6df04d4584aa5f0c0089e205c3, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:43,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/1380df6df04d4584aa5f0c0089e205c3 is 175, key is test_row_0/A:col10/1732130803378/Put/seqid=0 2024-11-20T19:26:43,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742204_1380 (size=22361) 2024-11-20T19:26:43,939 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/1380df6df04d4584aa5f0c0089e205c3 2024-11-20T19:26:43,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T19:26:43,972 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:43,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:43,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:43,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:43,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:43,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/1b8e48ab2d6d4f3e9478691e23f70d87 is 50, key is test_row_0/B:col10/1732130803378/Put/seqid=0 2024-11-20T19:26:44,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742205_1381 (size=9657) 2024-11-20T19:26:44,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/1b8e48ab2d6d4f3e9478691e23f70d87 2024-11-20T19:26:44,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130864049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130864050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130864051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130864051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130864058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/023327f8b7a343f2a567c0f5dcdbea8b is 50, key is test_row_0/C:col10/1732130803378/Put/seqid=0 2024-11-20T19:26:44,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742206_1382 (size=9657) 2024-11-20T19:26:44,104 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/023327f8b7a343f2a567c0f5dcdbea8b 2024-11-20T19:26:44,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/1380df6df04d4584aa5f0c0089e205c3 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1380df6df04d4584aa5f0c0089e205c3 2024-11-20T19:26:44,112 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1380df6df04d4584aa5f0c0089e205c3, entries=100, sequenceid=17, filesize=21.8 K 2024-11-20T19:26:44,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/1b8e48ab2d6d4f3e9478691e23f70d87 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/1b8e48ab2d6d4f3e9478691e23f70d87 2024-11-20T19:26:44,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/1b8e48ab2d6d4f3e9478691e23f70d87, entries=100, sequenceid=17, filesize=9.4 K 2024-11-20T19:26:44,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/023327f8b7a343f2a567c0f5dcdbea8b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/023327f8b7a343f2a567c0f5dcdbea8b 2024-11-20T19:26:44,124 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:44,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:44,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:44,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:44,125 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:44,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:44,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/023327f8b7a343f2a567c0f5dcdbea8b, entries=100, sequenceid=17, filesize=9.4 K 2024-11-20T19:26:44,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:44,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 2d2917374eb6d0879b57e7f84a3c009e in 709ms, sequenceid=17, compaction requested=false 2024-11-20T19:26:44,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:44,278 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:44,279 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:26:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:44,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201ff7aa34698a4e8e85bf3503958acbf8_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130803438/Put/seqid=0 2024-11-20T19:26:44,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742207_1383 (size=12154) 2024-11-20T19:26:44,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:44,350 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201ff7aa34698a4e8e85bf3503958acbf8_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201ff7aa34698a4e8e85bf3503958acbf8_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:44,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/eec1fdeefa8746a7899668f9be5f8b08, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:44,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/eec1fdeefa8746a7899668f9be5f8b08 is 175, key is test_row_0/A:col10/1732130803438/Put/seqid=0 2024-11-20T19:26:44,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742208_1384 (size=30955) 2024-11-20T19:26:44,424 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/eec1fdeefa8746a7899668f9be5f8b08 2024-11-20T19:26:44,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/becadab61fbc4e05bd15f2c78138f99c is 50, key is test_row_0/B:col10/1732130803438/Put/seqid=0 2024-11-20T19:26:44,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T19:26:44,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742209_1385 (size=12001) 2024-11-20T19:26:44,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:44,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:44,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130864567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130864567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130864573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130864574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130864576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130864678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130864678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130864684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130864688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130864881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130864883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,887 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/becadab61fbc4e05bd15f2c78138f99c 2024-11-20T19:26:44,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130864892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/c9f99c59da934351ac37f031f416cf05 is 50, key is test_row_0/C:col10/1732130803438/Put/seqid=0 2024-11-20T19:26:44,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130864897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:44,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742210_1386 (size=12001) 2024-11-20T19:26:45,039 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T19:26:45,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130865188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130865188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130865195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130865210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,341 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/c9f99c59da934351ac37f031f416cf05 2024-11-20T19:26:45,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/eec1fdeefa8746a7899668f9be5f8b08 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/eec1fdeefa8746a7899668f9be5f8b08 2024-11-20T19:26:45,354 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/eec1fdeefa8746a7899668f9be5f8b08, entries=150, sequenceid=40, filesize=30.2 K 2024-11-20T19:26:45,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/becadab61fbc4e05bd15f2c78138f99c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/becadab61fbc4e05bd15f2c78138f99c 2024-11-20T19:26:45,362 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/becadab61fbc4e05bd15f2c78138f99c, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T19:26:45,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/c9f99c59da934351ac37f031f416cf05 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/c9f99c59da934351ac37f031f416cf05 2024-11-20T19:26:45,370 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/c9f99c59da934351ac37f031f416cf05, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T19:26:45,371 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 2d2917374eb6d0879b57e7f84a3c009e in 1092ms, sequenceid=40, compaction requested=false 2024-11-20T19:26:45,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:45,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:45,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-20T19:26:45,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-11-20T19:26:45,375 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-20T19:26:45,375 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0100 sec 2024-11-20T19:26:45,376 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 2.0160 sec 2024-11-20T19:26:45,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T19:26:45,468 INFO [Thread-1702 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-11-20T19:26:45,470 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:45,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-11-20T19:26:45,471 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:45,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:45,472 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:45,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:45,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:45,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T19:26:45,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:45,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:45,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:45,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:45,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:45,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:45,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:45,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202f2aac6a4c404783934e8d4a123e7931_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130804573/Put/seqid=0 2024-11-20T19:26:45,629 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:45,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:45,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:45,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:45,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:45,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:45,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:45,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742211_1387 (size=14594) 2024-11-20T19:26:45,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130865723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130865731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130865732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130865732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130865730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:45,781 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:45,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:45,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:45,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:45,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:45,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:45,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130865852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130865852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130865852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130865852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130865863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,935 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:45,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:45,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:45,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:45,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:45,937 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:45,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:45,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,048 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,052 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202f2aac6a4c404783934e8d4a123e7931_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f2aac6a4c404783934e8d4a123e7931_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:46,053 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/0da5d00675424532a3d043247070e915, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:46,053 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/0da5d00675424532a3d043247070e915 is 175, key is test_row_0/A:col10/1732130804573/Put/seqid=0 2024-11-20T19:26:46,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130866064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130866065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130866066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130866066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:46,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130866070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,089 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:46,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:46,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742212_1388 (size=39549) 2024-11-20T19:26:46,243 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:46,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:46,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130866374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130866374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130866374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130866375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130866380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,396 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:46,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:46,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,508 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/0da5d00675424532a3d043247070e915 2024-11-20T19:26:46,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/7b6b06497af643bba08b12970f3841d1 is 50, key is test_row_0/B:col10/1732130804573/Put/seqid=0 2024-11-20T19:26:46,549 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:46,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:46,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,550 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742213_1389 (size=12001) 2024-11-20T19:26:46,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/7b6b06497af643bba08b12970f3841d1 2024-11-20T19:26:46,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/055b1c1c07034169822324e8fdf94fce is 50, key is test_row_0/C:col10/1732130804573/Put/seqid=0 2024-11-20T19:26:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:46,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742214_1390 (size=12001) 2024-11-20T19:26:46,600 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/055b1c1c07034169822324e8fdf94fce 2024-11-20T19:26:46,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/0da5d00675424532a3d043247070e915 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/0da5d00675424532a3d043247070e915 2024-11-20T19:26:46,623 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/0da5d00675424532a3d043247070e915, entries=200, sequenceid=54, filesize=38.6 K 2024-11-20T19:26:46,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/7b6b06497af643bba08b12970f3841d1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/7b6b06497af643bba08b12970f3841d1 2024-11-20T19:26:46,629 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/7b6b06497af643bba08b12970f3841d1, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T19:26:46,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/055b1c1c07034169822324e8fdf94fce as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/055b1c1c07034169822324e8fdf94fce 2024-11-20T19:26:46,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/055b1c1c07034169822324e8fdf94fce, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T19:26:46,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 2d2917374eb6d0879b57e7f84a3c009e in 1047ms, sequenceid=54, compaction requested=true 2024-11-20T19:26:46,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:46,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:46,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:46,637 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:46,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:46,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:46,637 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:46,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:46,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:46,639 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:46,639 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/B is initiating minor compaction (all files) 2024-11-20T19:26:46,639 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/B in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,639 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/1b8e48ab2d6d4f3e9478691e23f70d87, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/becadab61fbc4e05bd15f2c78138f99c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/7b6b06497af643bba08b12970f3841d1] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=32.9 K 2024-11-20T19:26:46,640 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b8e48ab2d6d4f3e9478691e23f70d87, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732130803378 2024-11-20T19:26:46,640 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:46,640 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/A is initiating minor compaction (all files) 2024-11-20T19:26:46,640 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/A in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,640 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1380df6df04d4584aa5f0c0089e205c3, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/eec1fdeefa8746a7899668f9be5f8b08, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/0da5d00675424532a3d043247070e915] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=90.7 K 2024-11-20T19:26:46,640 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,640 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1380df6df04d4584aa5f0c0089e205c3, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/eec1fdeefa8746a7899668f9be5f8b08, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/0da5d00675424532a3d043247070e915] 2024-11-20T19:26:46,640 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1380df6df04d4584aa5f0c0089e205c3, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732130803378 2024-11-20T19:26:46,640 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting becadab61fbc4e05bd15f2c78138f99c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732130803437 2024-11-20T19:26:46,641 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting eec1fdeefa8746a7899668f9be5f8b08, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732130803437 2024-11-20T19:26:46,641 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b6b06497af643bba08b12970f3841d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732130804566 2024-11-20T19:26:46,641 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0da5d00675424532a3d043247070e915, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732130804566 2024-11-20T19:26:46,656 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:46,668 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#B#compaction#331 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:46,669 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/5eb61774c2df4d8ea31bc3a501127448 is 50, key is test_row_0/B:col10/1732130804573/Put/seqid=0 2024-11-20T19:26:46,675 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411209c852cf4d96e4bdcacfe0394a8412882_2d2917374eb6d0879b57e7f84a3c009e store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:46,677 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411209c852cf4d96e4bdcacfe0394a8412882_2d2917374eb6d0879b57e7f84a3c009e, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:46,677 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209c852cf4d96e4bdcacfe0394a8412882_2d2917374eb6d0879b57e7f84a3c009e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:46,703 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,704 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:26:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:46,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742215_1391 (size=12104) 2024-11-20T19:26:46,718 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/5eb61774c2df4d8ea31bc3a501127448 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/5eb61774c2df4d8ea31bc3a501127448 2024-11-20T19:26:46,725 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/B of 2d2917374eb6d0879b57e7f84a3c009e into 5eb61774c2df4d8ea31bc3a501127448(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:46,725 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:46,725 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/B, priority=13, startTime=1732130806637; duration=0sec 2024-11-20T19:26:46,726 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:46,726 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:B 2024-11-20T19:26:46,727 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:46,727 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:46,727 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/C is initiating minor compaction (all files) 2024-11-20T19:26:46,728 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/C in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:46,728 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/023327f8b7a343f2a567c0f5dcdbea8b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/c9f99c59da934351ac37f031f416cf05, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/055b1c1c07034169822324e8fdf94fce] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=32.9 K 2024-11-20T19:26:46,728 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 023327f8b7a343f2a567c0f5dcdbea8b, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732130803378 2024-11-20T19:26:46,728 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c9f99c59da934351ac37f031f416cf05, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732130803437 2024-11-20T19:26:46,728 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 055b1c1c07034169822324e8fdf94fce, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732130804566 2024-11-20T19:26:46,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742216_1392 (size=4469) 2024-11-20T19:26:46,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200ade4a91261744adbd8221474c378884_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130805731/Put/seqid=0 2024-11-20T19:26:46,751 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#C#compaction#333 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:46,751 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/427f1031e65d49f3ae1f24fcef32f122 is 50, key is test_row_0/C:col10/1732130804573/Put/seqid=0 2024-11-20T19:26:46,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742217_1393 (size=12154) 2024-11-20T19:26:46,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742218_1394 (size=12104) 2024-11-20T19:26:46,781 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200ade4a91261744adbd8221474c378884_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200ade4a91261744adbd8221474c378884_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:46,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/35075c8e4cfd423d9be80191ba0cba8e, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:46,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/35075c8e4cfd423d9be80191ba0cba8e is 175, key is test_row_0/A:col10/1732130805731/Put/seqid=0 2024-11-20T19:26:46,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742219_1395 (size=30955) 2024-11-20T19:26:46,819 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/35075c8e4cfd423d9be80191ba0cba8e 2024-11-20T19:26:46,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/b48e0738db8e4e6a8a692f11da507306 is 50, key is test_row_0/B:col10/1732130805731/Put/seqid=0 2024-11-20T19:26:46,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742220_1396 (size=12001) 2024-11-20T19:26:46,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:46,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:46,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130866902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130866904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130866905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130866906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:46,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130866906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130867009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130867017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130867017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130867018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130867017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,142 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#A#compaction#330 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:47,143 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/36012ac0601244cfb3b20020d1dbed60 is 175, key is test_row_0/A:col10/1732130804573/Put/seqid=0 2024-11-20T19:26:47,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742221_1397 (size=31058) 2024-11-20T19:26:47,186 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/427f1031e65d49f3ae1f24fcef32f122 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/427f1031e65d49f3ae1f24fcef32f122 2024-11-20T19:26:47,192 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/C of 2d2917374eb6d0879b57e7f84a3c009e into 427f1031e65d49f3ae1f24fcef32f122(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:47,192 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:47,192 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/C, priority=13, startTime=1732130806637; duration=0sec 2024-11-20T19:26:47,192 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:47,193 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:C 2024-11-20T19:26:47,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130867214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130867232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130867232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130867233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130867233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,280 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/b48e0738db8e4e6a8a692f11da507306 2024-11-20T19:26:47,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/1b276b62c2ff4f44a835bd42d0187f17 is 50, key is test_row_0/C:col10/1732130805731/Put/seqid=0 2024-11-20T19:26:47,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742222_1398 (size=12001) 2024-11-20T19:26:47,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130867520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130867541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130867541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130867541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130867541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:47,561 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/36012ac0601244cfb3b20020d1dbed60 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/36012ac0601244cfb3b20020d1dbed60 2024-11-20T19:26:47,566 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/A of 2d2917374eb6d0879b57e7f84a3c009e into 36012ac0601244cfb3b20020d1dbed60(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:47,566 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:47,566 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/A, priority=13, startTime=1732130806637; duration=0sec 2024-11-20T19:26:47,566 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:47,566 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:A 2024-11-20T19:26:47,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:47,732 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/1b276b62c2ff4f44a835bd42d0187f17 2024-11-20T19:26:47,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/35075c8e4cfd423d9be80191ba0cba8e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/35075c8e4cfd423d9be80191ba0cba8e 2024-11-20T19:26:47,741 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/35075c8e4cfd423d9be80191ba0cba8e, entries=150, sequenceid=77, filesize=30.2 K 2024-11-20T19:26:47,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/b48e0738db8e4e6a8a692f11da507306 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/b48e0738db8e4e6a8a692f11da507306 2024-11-20T19:26:47,746 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/b48e0738db8e4e6a8a692f11da507306, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T19:26:47,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/1b276b62c2ff4f44a835bd42d0187f17 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/1b276b62c2ff4f44a835bd42d0187f17 2024-11-20T19:26:47,752 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/1b276b62c2ff4f44a835bd42d0187f17, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T19:26:47,753 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 2d2917374eb6d0879b57e7f84a3c009e in 1049ms, sequenceid=77, compaction requested=false 2024-11-20T19:26:47,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:47,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:47,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-20T19:26:47,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-11-20T19:26:47,757 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-20T19:26:47,757 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2820 sec 2024-11-20T19:26:47,758 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 2.2870 sec 2024-11-20T19:26:48,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:48,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T19:26:48,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:48,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:48,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:48,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:48,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:48,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:48,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ea6933042f24452d9704048251f940e2_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130808032/Put/seqid=0 2024-11-20T19:26:48,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742223_1399 (size=14594) 2024-11-20T19:26:48,065 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:48,069 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ea6933042f24452d9704048251f940e2_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ea6933042f24452d9704048251f940e2_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:48,069 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/213196d5daa34471942c257e4d6b24ae, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:48,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/213196d5daa34471942c257e4d6b24ae is 175, key is test_row_0/A:col10/1732130808032/Put/seqid=0 2024-11-20T19:26:48,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130868083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130868093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130868094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130868095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130868095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742224_1400 (size=39549) 2024-11-20T19:26:48,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130868196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130868205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130868206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130868206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130868207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130868405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130868412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130868412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130868415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130868415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,519 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/213196d5daa34471942c257e4d6b24ae 2024-11-20T19:26:48,537 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/4d63be3616d244eea997bf78656e5626 is 50, key is test_row_0/B:col10/1732130808032/Put/seqid=0 2024-11-20T19:26:48,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742225_1401 (size=12001) 2024-11-20T19:26:48,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130868714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130868722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130868724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130868724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130868724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:48,985 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/4d63be3616d244eea997bf78656e5626 2024-11-20T19:26:49,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/ba045a0c5177444ca1c6b6b1c6e7edc7 is 50, key is test_row_0/C:col10/1732130808032/Put/seqid=0 2024-11-20T19:26:49,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742226_1402 (size=12001) 2024-11-20T19:26:49,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130869221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:49,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130869229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:49,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130869229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:49,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130869231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:49,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130869232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:49,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/ba045a0c5177444ca1c6b6b1c6e7edc7 2024-11-20T19:26:49,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/213196d5daa34471942c257e4d6b24ae as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/213196d5daa34471942c257e4d6b24ae 2024-11-20T19:26:49,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/213196d5daa34471942c257e4d6b24ae, entries=200, sequenceid=94, filesize=38.6 K 2024-11-20T19:26:49,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/4d63be3616d244eea997bf78656e5626 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/4d63be3616d244eea997bf78656e5626 2024-11-20T19:26:49,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/4d63be3616d244eea997bf78656e5626, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T19:26:49,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/ba045a0c5177444ca1c6b6b1c6e7edc7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/ba045a0c5177444ca1c6b6b1c6e7edc7 2024-11-20T19:26:49,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/ba045a0c5177444ca1c6b6b1c6e7edc7, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T19:26:49,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 2d2917374eb6d0879b57e7f84a3c009e in 1448ms, sequenceid=94, compaction requested=true 2024-11-20T19:26:49,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:49,481 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:49,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:49,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:49,481 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:49,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:49,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:49,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:49,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:49,482 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:49,482 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/A is initiating minor compaction (all files) 2024-11-20T19:26:49,482 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/A in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:49,482 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/36012ac0601244cfb3b20020d1dbed60, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/35075c8e4cfd423d9be80191ba0cba8e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/213196d5daa34471942c257e4d6b24ae] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=99.2 K 2024-11-20T19:26:49,482 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:49,482 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/36012ac0601244cfb3b20020d1dbed60, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/35075c8e4cfd423d9be80191ba0cba8e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/213196d5daa34471942c257e4d6b24ae] 2024-11-20T19:26:49,482 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:49,482 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/B is initiating minor compaction (all files) 2024-11-20T19:26:49,482 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/B in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:49,482 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/5eb61774c2df4d8ea31bc3a501127448, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/b48e0738db8e4e6a8a692f11da507306, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/4d63be3616d244eea997bf78656e5626] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=35.3 K 2024-11-20T19:26:49,483 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36012ac0601244cfb3b20020d1dbed60, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732130804566 2024-11-20T19:26:49,483 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5eb61774c2df4d8ea31bc3a501127448, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732130804566 2024-11-20T19:26:49,483 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35075c8e4cfd423d9be80191ba0cba8e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732130805696 2024-11-20T19:26:49,483 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting b48e0738db8e4e6a8a692f11da507306, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732130805696 2024-11-20T19:26:49,483 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 213196d5daa34471942c257e4d6b24ae, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732130806902 2024-11-20T19:26:49,483 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d63be3616d244eea997bf78656e5626, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732130806902 2024-11-20T19:26:49,490 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#B#compaction#339 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:49,490 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/3bc2070f788d40e8b4fa723b1e500467 is 50, key is test_row_0/B:col10/1732130808032/Put/seqid=0 2024-11-20T19:26:49,491 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:49,502 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120a9e7e9adcab54bc3b6b6e71d660fb9a3_2d2917374eb6d0879b57e7f84a3c009e store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:49,504 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120a9e7e9adcab54bc3b6b6e71d660fb9a3_2d2917374eb6d0879b57e7f84a3c009e, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:49,505 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a9e7e9adcab54bc3b6b6e71d660fb9a3_2d2917374eb6d0879b57e7f84a3c009e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:49,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742227_1403 (size=12207) 2024-11-20T19:26:49,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742228_1404 (size=4469) 2024-11-20T19:26:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:49,576 INFO [Thread-1702 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-20T19:26:49,577 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-20T19:26:49,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T19:26:49,579 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:49,580 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:49,580 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T19:26:49,737 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:49,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T19:26:49,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:49,739 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T19:26:49,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:49,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:49,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:49,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:49,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:49,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:49,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203052fba14c3c49a3b05e0d71eb5e89f9_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130808094/Put/seqid=0 2024-11-20T19:26:49,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742229_1405 (size=12154) 2024-11-20T19:26:49,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:49,816 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203052fba14c3c49a3b05e0d71eb5e89f9_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203052fba14c3c49a3b05e0d71eb5e89f9_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:49,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/2d8d5855c64049b791c8d7eda9cd17d3, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:49,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/2d8d5855c64049b791c8d7eda9cd17d3 is 175, key is test_row_0/A:col10/1732130808094/Put/seqid=0 2024-11-20T19:26:49,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742230_1406 (size=30955) 2024-11-20T19:26:49,836 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=116, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/2d8d5855c64049b791c8d7eda9cd17d3 2024-11-20T19:26:49,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/3ed5bc2944d64b4886b7a450b1db23e5 is 50, key is test_row_0/B:col10/1732130808094/Put/seqid=0 2024-11-20T19:26:49,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T19:26:49,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742231_1407 (size=12001) 2024-11-20T19:26:49,936 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/3bc2070f788d40e8b4fa723b1e500467 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/3bc2070f788d40e8b4fa723b1e500467 2024-11-20T19:26:49,942 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/B of 2d2917374eb6d0879b57e7f84a3c009e into 3bc2070f788d40e8b4fa723b1e500467(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:49,942 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:49,942 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/B, priority=13, startTime=1732130809481; duration=0sec 2024-11-20T19:26:49,942 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:49,942 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:B 2024-11-20T19:26:49,942 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:49,943 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:49,943 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/C is initiating minor compaction (all files) 2024-11-20T19:26:49,943 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/C in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:49,943 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/427f1031e65d49f3ae1f24fcef32f122, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/1b276b62c2ff4f44a835bd42d0187f17, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/ba045a0c5177444ca1c6b6b1c6e7edc7] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=35.3 K 2024-11-20T19:26:49,944 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 427f1031e65d49f3ae1f24fcef32f122, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732130804566 2024-11-20T19:26:49,944 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b276b62c2ff4f44a835bd42d0187f17, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732130805696 2024-11-20T19:26:49,944 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting ba045a0c5177444ca1c6b6b1c6e7edc7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732130806902 2024-11-20T19:26:49,953 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#C#compaction#343 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:49,954 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/40b37fae8b014c3e854fe69010067b8a is 50, key is test_row_0/C:col10/1732130808032/Put/seqid=0 2024-11-20T19:26:49,955 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#A#compaction#340 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:49,955 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/16dbd6bf78934995a3312222fcd2a3a7 is 175, key is test_row_0/A:col10/1732130808032/Put/seqid=0 2024-11-20T19:26:49,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742232_1408 (size=12207) 2024-11-20T19:26:49,999 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/40b37fae8b014c3e854fe69010067b8a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/40b37fae8b014c3e854fe69010067b8a 2024-11-20T19:26:50,006 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/C of 2d2917374eb6d0879b57e7f84a3c009e into 40b37fae8b014c3e854fe69010067b8a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:50,006 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:50,006 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/C, priority=13, startTime=1732130809482; duration=0sec 2024-11-20T19:26:50,006 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:50,006 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:C 2024-11-20T19:26:50,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742233_1409 (size=31161) 2024-11-20T19:26:50,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T19:26:50,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:50,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:50,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130870253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130870256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130870256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130870257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130870263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,299 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/3ed5bc2944d64b4886b7a450b1db23e5 2024-11-20T19:26:50,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/92fe7c9da68044c4b974cb06f8fb3ab9 is 50, key is test_row_0/C:col10/1732130808094/Put/seqid=0 2024-11-20T19:26:50,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742234_1410 (size=12001) 2024-11-20T19:26:50,316 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/92fe7c9da68044c4b974cb06f8fb3ab9 2024-11-20T19:26:50,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/2d8d5855c64049b791c8d7eda9cd17d3 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2d8d5855c64049b791c8d7eda9cd17d3 2024-11-20T19:26:50,331 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2d8d5855c64049b791c8d7eda9cd17d3, entries=150, sequenceid=116, filesize=30.2 K 2024-11-20T19:26:50,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/3ed5bc2944d64b4886b7a450b1db23e5 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/3ed5bc2944d64b4886b7a450b1db23e5 2024-11-20T19:26:50,340 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/3ed5bc2944d64b4886b7a450b1db23e5, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T19:26:50,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/92fe7c9da68044c4b974cb06f8fb3ab9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/92fe7c9da68044c4b974cb06f8fb3ab9 2024-11-20T19:26:50,347 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/92fe7c9da68044c4b974cb06f8fb3ab9, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T19:26:50,349 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 2d2917374eb6d0879b57e7f84a3c009e in 610ms, sequenceid=116, compaction requested=false 2024-11-20T19:26:50,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:50,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:50,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-20T19:26:50,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-20T19:26:50,351 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-20T19:26:50,351 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 770 msec 2024-11-20T19:26:50,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 777 msec 2024-11-20T19:26:50,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T19:26:50,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:50,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:50,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:50,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:50,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:50,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:50,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:50,385 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120acc1ef23cf5345ad9d65568fa33cf54d_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130810255/Put/seqid=0 2024-11-20T19:26:50,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130870408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130870418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130870419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130870419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130870422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742235_1411 (size=14794) 2024-11-20T19:26:50,432 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:50,434 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/16dbd6bf78934995a3312222fcd2a3a7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/16dbd6bf78934995a3312222fcd2a3a7 2024-11-20T19:26:50,436 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120acc1ef23cf5345ad9d65568fa33cf54d_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120acc1ef23cf5345ad9d65568fa33cf54d_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:50,439 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/2086d1545c09469fac7a22f097a2ea9a, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:50,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/2086d1545c09469fac7a22f097a2ea9a is 175, key is test_row_0/A:col10/1732130810255/Put/seqid=0 2024-11-20T19:26:50,443 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/A of 2d2917374eb6d0879b57e7f84a3c009e into 16dbd6bf78934995a3312222fcd2a3a7(size=30.4 K), total size for store is 60.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:50,443 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:50,443 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/A, priority=13, startTime=1732130809481; duration=0sec 2024-11-20T19:26:50,443 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:50,443 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:A 2024-11-20T19:26:50,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742236_1412 (size=39749) 2024-11-20T19:26:50,453 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/2086d1545c09469fac7a22f097a2ea9a 2024-11-20T19:26:50,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/b035ecac4a214b189da2e314851a67a4 is 50, key is test_row_0/B:col10/1732130810255/Put/seqid=0 2024-11-20T19:26:50,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742237_1413 (size=12151) 2024-11-20T19:26:50,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130870522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130870523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130870529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130870530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130870531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T19:26:50,682 INFO [Thread-1702 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-20T19:26:50,684 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:50,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-20T19:26:50,687 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:50,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T19:26:50,688 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:50,688 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:50,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130870733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130870733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130870734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130870735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130870735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T19:26:50,840 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T19:26:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:50,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/b035ecac4a214b189da2e314851a67a4 2024-11-20T19:26:50,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/5c6cb4832a34409498256571d50b040c is 50, key is test_row_0/C:col10/1732130810255/Put/seqid=0 2024-11-20T19:26:50,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742238_1414 (size=12151) 2024-11-20T19:26:50,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T19:26:50,993 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:50,993 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T19:26:50,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:50,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:50,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:50,994 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130871037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130871038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130871039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130871039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130871040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,146 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T19:26:51,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:51,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:51,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:51,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,163 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T19:26:51,163 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T19:26:51,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T19:26:51,298 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T19:26:51,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:51,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:51,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:51,299 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/5c6cb4832a34409498256571d50b040c 2024-11-20T19:26:51,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/2086d1545c09469fac7a22f097a2ea9a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2086d1545c09469fac7a22f097a2ea9a 2024-11-20T19:26:51,350 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2086d1545c09469fac7a22f097a2ea9a, entries=200, sequenceid=133, filesize=38.8 K 2024-11-20T19:26:51,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/b035ecac4a214b189da2e314851a67a4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/b035ecac4a214b189da2e314851a67a4 2024-11-20T19:26:51,356 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/b035ecac4a214b189da2e314851a67a4, entries=150, sequenceid=133, filesize=11.9 K 2024-11-20T19:26:51,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/5c6cb4832a34409498256571d50b040c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/5c6cb4832a34409498256571d50b040c 2024-11-20T19:26:51,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/5c6cb4832a34409498256571d50b040c, entries=150, sequenceid=133, filesize=11.9 K 2024-11-20T19:26:51,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 2d2917374eb6d0879b57e7f84a3c009e in 994ms, sequenceid=133, compaction requested=true 2024-11-20T19:26:51,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:51,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:51,366 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:51,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:51,366 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:51,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:51,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:51,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:51,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:51,369 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:51,369 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:51,369 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/A is initiating minor compaction (all files) 2024-11-20T19:26:51,369 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/B is initiating minor compaction (all files) 2024-11-20T19:26:51,369 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/B in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:51,369 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/A in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:51,369 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/3bc2070f788d40e8b4fa723b1e500467, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/3ed5bc2944d64b4886b7a450b1db23e5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/b035ecac4a214b189da2e314851a67a4] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=35.5 K 2024-11-20T19:26:51,369 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/16dbd6bf78934995a3312222fcd2a3a7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2d8d5855c64049b791c8d7eda9cd17d3, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2086d1545c09469fac7a22f097a2ea9a] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=99.5 K 2024-11-20T19:26:51,369 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:51,369 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/16dbd6bf78934995a3312222fcd2a3a7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2d8d5855c64049b791c8d7eda9cd17d3, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2086d1545c09469fac7a22f097a2ea9a] 2024-11-20T19:26:51,369 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bc2070f788d40e8b4fa723b1e500467, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732130806902 2024-11-20T19:26:51,370 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16dbd6bf78934995a3312222fcd2a3a7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732130806902 2024-11-20T19:26:51,370 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d8d5855c64049b791c8d7eda9cd17d3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732130808091 2024-11-20T19:26:51,371 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ed5bc2944d64b4886b7a450b1db23e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732130808091 2024-11-20T19:26:51,371 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2086d1545c09469fac7a22f097a2ea9a, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732130810255 2024-11-20T19:26:51,371 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting b035ecac4a214b189da2e314851a67a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732130810255 2024-11-20T19:26:51,390 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#B#compaction#348 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:51,390 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/e94a06641dbd4c21bfbe3e9f90954214 is 50, key is test_row_0/B:col10/1732130810255/Put/seqid=0 2024-11-20T19:26:51,398 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:51,409 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411207de9a0e4c8bf4cc081849743d8120865_2d2917374eb6d0879b57e7f84a3c009e store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:51,412 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411207de9a0e4c8bf4cc081849743d8120865_2d2917374eb6d0879b57e7f84a3c009e, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:51,412 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207de9a0e4c8bf4cc081849743d8120865_2d2917374eb6d0879b57e7f84a3c009e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:51,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742239_1415 (size=12459) 2024-11-20T19:26:51,442 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/e94a06641dbd4c21bfbe3e9f90954214 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/e94a06641dbd4c21bfbe3e9f90954214 2024-11-20T19:26:51,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742240_1416 (size=4469) 2024-11-20T19:26:51,451 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/B of 2d2917374eb6d0879b57e7f84a3c009e into e94a06641dbd4c21bfbe3e9f90954214(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:51,451 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:51,451 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/B, priority=13, startTime=1732130811366; duration=0sec 2024-11-20T19:26:51,451 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:51,451 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:B 2024-11-20T19:26:51,451 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:51,452 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,452 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:51,452 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/C is initiating minor compaction (all files) 2024-11-20T19:26:51,452 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/C in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:51,452 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/40b37fae8b014c3e854fe69010067b8a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/92fe7c9da68044c4b974cb06f8fb3ab9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/5c6cb4832a34409498256571d50b040c] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=35.5 K 2024-11-20T19:26:51,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T19:26:51,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:51,453 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T19:26:51,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:51,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:51,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:51,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:51,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:51,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:51,454 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 40b37fae8b014c3e854fe69010067b8a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732130806902 2024-11-20T19:26:51,454 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 92fe7c9da68044c4b974cb06f8fb3ab9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732130808091 2024-11-20T19:26:51,454 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c6cb4832a34409498256571d50b040c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732130810255 2024-11-20T19:26:51,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120897b28dafc5f462db931c0c42031e2dd_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130810406/Put/seqid=0 2024-11-20T19:26:51,504 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#C#compaction#351 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:51,505 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/5a0e671b0ad544fe9a701976e1243847 is 50, key is test_row_0/C:col10/1732130810255/Put/seqid=0 2024-11-20T19:26:51,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742241_1417 (size=12304) 2024-11-20T19:26:51,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:51,547 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120897b28dafc5f462db931c0c42031e2dd_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120897b28dafc5f462db931c0c42031e2dd_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:51,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/d65ccc9d27ca473282cf5e55154c7291, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:51,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/d65ccc9d27ca473282cf5e55154c7291 is 175, key is test_row_0/A:col10/1732130810406/Put/seqid=0 2024-11-20T19:26:51,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:51,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:51,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742242_1418 (size=12459) 2024-11-20T19:26:51,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130871565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130871571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742243_1419 (size=31105) 2024-11-20T19:26:51,582 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/d65ccc9d27ca473282cf5e55154c7291 2024-11-20T19:26:51,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130871573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130871575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130871576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/35a9c2faab8b40feb12c5e0dbae625e8 is 50, key is test_row_0/B:col10/1732130810406/Put/seqid=0 2024-11-20T19:26:51,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742244_1420 (size=12151) 2024-11-20T19:26:51,648 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/35a9c2faab8b40feb12c5e0dbae625e8 2024-11-20T19:26:51,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/688adba07a3d4e15b1e3623ef0d136b2 is 50, key is test_row_0/C:col10/1732130810406/Put/seqid=0 2024-11-20T19:26:51,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130871678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742245_1421 (size=12151) 2024-11-20T19:26:51,688 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/688adba07a3d4e15b1e3623ef0d136b2 2024-11-20T19:26:51,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130871683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130871690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130871690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130871690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/d65ccc9d27ca473282cf5e55154c7291 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d65ccc9d27ca473282cf5e55154c7291 2024-11-20T19:26:51,702 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d65ccc9d27ca473282cf5e55154c7291, entries=150, sequenceid=157, filesize=30.4 K 2024-11-20T19:26:51,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/35a9c2faab8b40feb12c5e0dbae625e8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/35a9c2faab8b40feb12c5e0dbae625e8 2024-11-20T19:26:51,711 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/35a9c2faab8b40feb12c5e0dbae625e8, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T19:26:51,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/688adba07a3d4e15b1e3623ef0d136b2 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/688adba07a3d4e15b1e3623ef0d136b2 2024-11-20T19:26:51,716 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/688adba07a3d4e15b1e3623ef0d136b2, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T19:26:51,717 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 2d2917374eb6d0879b57e7f84a3c009e in 264ms, sequenceid=157, compaction requested=false 2024-11-20T19:26:51,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:51,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:51,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-20T19:26:51,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-20T19:26:51,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-20T19:26:51,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0300 sec 2024-11-20T19:26:51,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 1.0360 sec 2024-11-20T19:26:51,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T19:26:51,793 INFO [Thread-1702 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-20T19:26:51,794 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:51,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-20T19:26:51,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T19:26:51,796 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:51,796 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:51,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:51,851 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#A#compaction#349 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:51,851 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/d62bd502ed0b4d74800dcc3f7d299117 is 175, key is test_row_0/A:col10/1732130810255/Put/seqid=0 2024-11-20T19:26:51,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742246_1422 (size=31413) 2024-11-20T19:26:51,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:51,896 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T19:26:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:51,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T19:26:51,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cdc3059c45394e46bd2c0c7892a1e0d4_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130811894/Put/seqid=0 2024-11-20T19:26:51,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742247_1423 (size=14794) 2024-11-20T19:26:51,934 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:51,942 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cdc3059c45394e46bd2c0c7892a1e0d4_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cdc3059c45394e46bd2c0c7892a1e0d4_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:51,943 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/7683cea3488e4338965d7b125f8187af, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:51,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/7683cea3488e4338965d7b125f8187af is 175, key is test_row_0/A:col10/1732130811894/Put/seqid=0 2024-11-20T19:26:51,947 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T19:26:51,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:51,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:51,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:51,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130871939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130871940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130871941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130871951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,964 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/5a0e671b0ad544fe9a701976e1243847 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/5a0e671b0ad544fe9a701976e1243847 2024-11-20T19:26:51,968 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/C of 2d2917374eb6d0879b57e7f84a3c009e into 5a0e671b0ad544fe9a701976e1243847(size=12.2 K), total size for store is 24.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:51,968 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:51,968 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/C, priority=13, startTime=1732130811368; duration=0sec 2024-11-20T19:26:51,969 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:51,969 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:C 2024-11-20T19:26:51,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130871952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:51,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742248_1424 (size=39749) 2024-11-20T19:26:52,004 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/7683cea3488e4338965d7b125f8187af 2024-11-20T19:26:52,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/c90c5a27772a4b14b73b9b558d347792 is 50, key is test_row_0/B:col10/1732130811894/Put/seqid=0 2024-11-20T19:26:52,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742249_1425 (size=12151) 2024-11-20T19:26:52,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130872053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130872053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130872053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130872063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130872071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T19:26:52,100 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,100 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T19:26:52,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:52,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:52,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:52,101 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,253 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T19:26:52,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:52,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:52,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:52,253 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130872260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130872260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130872263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130872270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130872280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,298 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/d62bd502ed0b4d74800dcc3f7d299117 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d62bd502ed0b4d74800dcc3f7d299117 2024-11-20T19:26:52,303 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/A of 2d2917374eb6d0879b57e7f84a3c009e into d62bd502ed0b4d74800dcc3f7d299117(size=30.7 K), total size for store is 61.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:52,303 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:52,303 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/A, priority=13, startTime=1732130811366; duration=0sec 2024-11-20T19:26:52,303 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:52,303 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:A 2024-11-20T19:26:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T19:26:52,405 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,406 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T19:26:52,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:52,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:52,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:52,406 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/c90c5a27772a4b14b73b9b558d347792 2024-11-20T19:26:52,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/24cbbc025b3e456da054ce4085a1bab8 is 50, key is test_row_0/C:col10/1732130811894/Put/seqid=0 2024-11-20T19:26:52,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742250_1426 (size=12151) 2024-11-20T19:26:52,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/24cbbc025b3e456da054ce4085a1bab8 2024-11-20T19:26:52,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/7683cea3488e4338965d7b125f8187af as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/7683cea3488e4338965d7b125f8187af 2024-11-20T19:26:52,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/7683cea3488e4338965d7b125f8187af, entries=200, sequenceid=172, filesize=38.8 K 2024-11-20T19:26:52,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/c90c5a27772a4b14b73b9b558d347792 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/c90c5a27772a4b14b73b9b558d347792 2024-11-20T19:26:52,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/c90c5a27772a4b14b73b9b558d347792, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T19:26:52,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/24cbbc025b3e456da054ce4085a1bab8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/24cbbc025b3e456da054ce4085a1bab8 2024-11-20T19:26:52,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/24cbbc025b3e456da054ce4085a1bab8, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T19:26:52,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 2d2917374eb6d0879b57e7f84a3c009e in 639ms, sequenceid=172, compaction requested=true 2024-11-20T19:26:52,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:52,535 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:52,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:52,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:52,535 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:52,536 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:52,536 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/B is initiating minor compaction (all files) 2024-11-20T19:26:52,536 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/B in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:52,536 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/e94a06641dbd4c21bfbe3e9f90954214, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/35a9c2faab8b40feb12c5e0dbae625e8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/c90c5a27772a4b14b73b9b558d347792] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=35.9 K 2024-11-20T19:26:52,537 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:52,537 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/A is initiating minor compaction (all files) 2024-11-20T19:26:52,537 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/A in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:52,537 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d62bd502ed0b4d74800dcc3f7d299117, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d65ccc9d27ca473282cf5e55154c7291, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/7683cea3488e4338965d7b125f8187af] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=99.9 K 2024-11-20T19:26:52,537 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:52,537 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d62bd502ed0b4d74800dcc3f7d299117, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d65ccc9d27ca473282cf5e55154c7291, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/7683cea3488e4338965d7b125f8187af] 2024-11-20T19:26:52,537 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting e94a06641dbd4c21bfbe3e9f90954214, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732130810255 2024-11-20T19:26:52,537 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 35a9c2faab8b40feb12c5e0dbae625e8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732130810406 2024-11-20T19:26:52,537 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d62bd502ed0b4d74800dcc3f7d299117, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732130810255 2024-11-20T19:26:52,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:52,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:52,538 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c90c5a27772a4b14b73b9b558d347792, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130811571 2024-11-20T19:26:52,538 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d65ccc9d27ca473282cf5e55154c7291, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732130810406 2024-11-20T19:26:52,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:52,539 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7683cea3488e4338965d7b125f8187af, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130811571 2024-11-20T19:26:52,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:52,557 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T19:26:52,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:52,558 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T19:26:52,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:52,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:52,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:52,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:52,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:52,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:52,561 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#B#compaction#357 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:52,561 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/0b7bdef7d7c1473ea3e5aaa402f06943 is 50, key is test_row_0/B:col10/1732130811894/Put/seqid=0 2024-11-20T19:26:52,571 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:52,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:52,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:52,582 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411209bfb65461f4640ffa0db3070e06c2e3a_2d2917374eb6d0879b57e7f84a3c009e store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:52,584 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411209bfb65461f4640ffa0db3070e06c2e3a_2d2917374eb6d0879b57e7f84a3c009e, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:52,584 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209bfb65461f4640ffa0db3070e06c2e3a_2d2917374eb6d0879b57e7f84a3c009e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:52,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130872591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130872599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742251_1427 (size=12561) 2024-11-20T19:26:52,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200bbda53d21424d7f87cff33db9233e8e_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130811949/Put/seqid=0 2024-11-20T19:26:52,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130872600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130872601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130872603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,623 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/0b7bdef7d7c1473ea3e5aaa402f06943 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/0b7bdef7d7c1473ea3e5aaa402f06943 2024-11-20T19:26:52,629 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/B of 2d2917374eb6d0879b57e7f84a3c009e into 0b7bdef7d7c1473ea3e5aaa402f06943(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:52,629 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:52,630 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/B, priority=13, startTime=1732130812535; duration=0sec 2024-11-20T19:26:52,630 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:52,630 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:B 2024-11-20T19:26:52,630 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:52,631 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:52,631 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/C is initiating minor compaction (all files) 2024-11-20T19:26:52,631 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/C in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:52,631 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/5a0e671b0ad544fe9a701976e1243847, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/688adba07a3d4e15b1e3623ef0d136b2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/24cbbc025b3e456da054ce4085a1bab8] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=35.9 K 2024-11-20T19:26:52,631 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a0e671b0ad544fe9a701976e1243847, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732130810255 2024-11-20T19:26:52,631 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 688adba07a3d4e15b1e3623ef0d136b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732130810406 2024-11-20T19:26:52,632 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 24cbbc025b3e456da054ce4085a1bab8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130811571 2024-11-20T19:26:52,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742252_1428 (size=4469) 2024-11-20T19:26:52,660 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#A#compaction#358 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:52,660 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/55b905288c4042d7b8cd3165516145c7 is 175, key is test_row_0/A:col10/1732130811894/Put/seqid=0 2024-11-20T19:26:52,676 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#C#compaction#360 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:52,677 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/7a66bd9ce4ea49efb1760d135a61e5bb is 50, key is test_row_0/C:col10/1732130811894/Put/seqid=0 2024-11-20T19:26:52,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742253_1429 (size=12304) 2024-11-20T19:26:52,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130872705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130872707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130872721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130872722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130872722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742254_1430 (size=31515) 2024-11-20T19:26:52,736 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/55b905288c4042d7b8cd3165516145c7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/55b905288c4042d7b8cd3165516145c7 2024-11-20T19:26:52,743 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/A of 2d2917374eb6d0879b57e7f84a3c009e into 55b905288c4042d7b8cd3165516145c7(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:52,743 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:52,743 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/A, priority=13, startTime=1732130812535; duration=0sec 2024-11-20T19:26:52,744 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:52,744 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:A 2024-11-20T19:26:52,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742255_1431 (size=12561) 2024-11-20T19:26:52,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T19:26:52,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130872915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130872917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130872929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130872930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:52,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130872930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:53,098 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200bbda53d21424d7f87cff33db9233e8e_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200bbda53d21424d7f87cff33db9233e8e_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:53,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/364e12e4e0b8487895570cd672af15d7, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:53,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/364e12e4e0b8487895570cd672af15d7 is 175, key is test_row_0/A:col10/1732130811949/Put/seqid=0 2024-11-20T19:26:53,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742256_1432 (size=31105) 2024-11-20T19:26:53,171 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/7a66bd9ce4ea49efb1760d135a61e5bb as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/7a66bd9ce4ea49efb1760d135a61e5bb 2024-11-20T19:26:53,177 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/C of 2d2917374eb6d0879b57e7f84a3c009e into 7a66bd9ce4ea49efb1760d135a61e5bb(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:53,178 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:53,178 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/C, priority=13, startTime=1732130812538; duration=0sec 2024-11-20T19:26:53,178 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:53,178 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:C 2024-11-20T19:26:53,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130873222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130873226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130873243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130873243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130873243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,533 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=195, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/364e12e4e0b8487895570cd672af15d7 2024-11-20T19:26:53,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/90e38d990afb4077b6d75a2e17be0a53 is 50, key is test_row_0/B:col10/1732130811949/Put/seqid=0 2024-11-20T19:26:53,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742257_1433 (size=12151) 2024-11-20T19:26:53,585 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/90e38d990afb4077b6d75a2e17be0a53 2024-11-20T19:26:53,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/625208d64160451396281d5ba19f0d18 is 50, key is test_row_0/C:col10/1732130811949/Put/seqid=0 2024-11-20T19:26:53,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742258_1434 (size=12151) 2024-11-20T19:26:53,650 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/625208d64160451396281d5ba19f0d18 2024-11-20T19:26:53,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/364e12e4e0b8487895570cd672af15d7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/364e12e4e0b8487895570cd672af15d7 2024-11-20T19:26:53,668 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/364e12e4e0b8487895570cd672af15d7, entries=150, sequenceid=195, filesize=30.4 K 2024-11-20T19:26:53,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/90e38d990afb4077b6d75a2e17be0a53 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/90e38d990afb4077b6d75a2e17be0a53 2024-11-20T19:26:53,673 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/90e38d990afb4077b6d75a2e17be0a53, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T19:26:53,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/625208d64160451396281d5ba19f0d18 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/625208d64160451396281d5ba19f0d18 2024-11-20T19:26:53,689 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/625208d64160451396281d5ba19f0d18, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T19:26:53,690 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 2d2917374eb6d0879b57e7f84a3c009e in 1131ms, sequenceid=195, compaction requested=false 2024-11-20T19:26:53,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:53,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:53,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-20T19:26:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-20T19:26:53,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-20T19:26:53,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8950 sec 2024-11-20T19:26:53,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.8980 sec 2024-11-20T19:26:53,733 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T19:26:53,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:53,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:53,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:53,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:53,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:53,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:53,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:53,753 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a666e1372fbc4631b1bd512a921d762b_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130813731/Put/seqid=0 2024-11-20T19:26:53,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742259_1435 (size=14794) 2024-11-20T19:26:53,777 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:53,782 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a666e1372fbc4631b1bd512a921d762b_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a666e1372fbc4631b1bd512a921d762b_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:53,784 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/c668c02b46e04fbeb669a4303c438584, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:53,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/c668c02b46e04fbeb669a4303c438584 is 175, key is test_row_0/A:col10/1732130813731/Put/seqid=0 2024-11-20T19:26:53,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130873772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130873774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130873774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130873775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130873775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742260_1436 (size=39749) 2024-11-20T19:26:53,806 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/c668c02b46e04fbeb669a4303c438584 2024-11-20T19:26:53,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/dc5836da34554fb6a110d04600b4b142 is 50, key is test_row_0/B:col10/1732130813731/Put/seqid=0 2024-11-20T19:26:53,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742261_1437 (size=12151) 2024-11-20T19:26:53,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/dc5836da34554fb6a110d04600b4b142 2024-11-20T19:26:53,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/fc6d37936bc24f098e0f5a34cee5b7ab is 50, key is test_row_0/C:col10/1732130813731/Put/seqid=0 2024-11-20T19:26:53,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130873888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130873889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130873890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130873890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T19:26:53,901 INFO [Thread-1702 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-20T19:26:53,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130873891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:53,902 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:53,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-20T19:26:53,904 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:53,904 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:53,904 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:53,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T19:26:53,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742262_1438 (size=12151) 2024-11-20T19:26:53,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/fc6d37936bc24f098e0f5a34cee5b7ab 2024-11-20T19:26:53,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/c668c02b46e04fbeb669a4303c438584 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/c668c02b46e04fbeb669a4303c438584 2024-11-20T19:26:53,933 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/c668c02b46e04fbeb669a4303c438584, entries=200, sequenceid=214, filesize=38.8 K 2024-11-20T19:26:53,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/dc5836da34554fb6a110d04600b4b142 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/dc5836da34554fb6a110d04600b4b142 2024-11-20T19:26:53,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/dc5836da34554fb6a110d04600b4b142, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T19:26:53,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/fc6d37936bc24f098e0f5a34cee5b7ab as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/fc6d37936bc24f098e0f5a34cee5b7ab 2024-11-20T19:26:53,945 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/fc6d37936bc24f098e0f5a34cee5b7ab, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T19:26:53,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 2d2917374eb6d0879b57e7f84a3c009e in 213ms, sequenceid=214, compaction requested=true 2024-11-20T19:26:53,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:53,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:53,946 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:53,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:53,946 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:53,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:53,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:53,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:53,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:53,947 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:53,947 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:53,947 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/A is initiating minor compaction (all files) 2024-11-20T19:26:53,947 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/B is initiating minor compaction (all files) 2024-11-20T19:26:53,947 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/B in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:53,947 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/A in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:53,948 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/0b7bdef7d7c1473ea3e5aaa402f06943, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/90e38d990afb4077b6d75a2e17be0a53, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/dc5836da34554fb6a110d04600b4b142] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=36.0 K 2024-11-20T19:26:53,948 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/55b905288c4042d7b8cd3165516145c7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/364e12e4e0b8487895570cd672af15d7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/c668c02b46e04fbeb669a4303c438584] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=100.0 K 2024-11-20T19:26:53,948 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:53,948 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/55b905288c4042d7b8cd3165516145c7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/364e12e4e0b8487895570cd672af15d7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/c668c02b46e04fbeb669a4303c438584] 2024-11-20T19:26:53,948 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b7bdef7d7c1473ea3e5aaa402f06943, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130811571 2024-11-20T19:26:53,948 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55b905288c4042d7b8cd3165516145c7, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130811571 2024-11-20T19:26:53,948 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 90e38d990afb4077b6d75a2e17be0a53, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732130811940 2024-11-20T19:26:53,948 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 364e12e4e0b8487895570cd672af15d7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732130811940 2024-11-20T19:26:53,948 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting dc5836da34554fb6a110d04600b4b142, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130812598 2024-11-20T19:26:53,949 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting c668c02b46e04fbeb669a4303c438584, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130812598 2024-11-20T19:26:53,963 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#B#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:53,964 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/835fb0f64c1d454292cc4bf15d9892d8 is 50, key is test_row_0/B:col10/1732130813731/Put/seqid=0 2024-11-20T19:26:53,979 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:53,988 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120759239f70e5c4f0cb42156c33b8cc0dc_2d2917374eb6d0879b57e7f84a3c009e store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:53,990 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120759239f70e5c4f0cb42156c33b8cc0dc_2d2917374eb6d0879b57e7f84a3c009e, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:53,990 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120759239f70e5c4f0cb42156c33b8cc0dc_2d2917374eb6d0879b57e7f84a3c009e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:54,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T19:26:54,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742263_1439 (size=12663) 2024-11-20T19:26:54,043 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/835fb0f64c1d454292cc4bf15d9892d8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/835fb0f64c1d454292cc4bf15d9892d8 2024-11-20T19:26:54,050 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/B of 2d2917374eb6d0879b57e7f84a3c009e into 835fb0f64c1d454292cc4bf15d9892d8(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:54,050 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:54,050 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/B, priority=13, startTime=1732130813946; duration=0sec 2024-11-20T19:26:54,050 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:54,050 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:B 2024-11-20T19:26:54,050 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:54,054 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:54,054 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/C is initiating minor compaction (all files) 2024-11-20T19:26:54,054 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/C in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:54,054 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/7a66bd9ce4ea49efb1760d135a61e5bb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/625208d64160451396281d5ba19f0d18, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/fc6d37936bc24f098e0f5a34cee5b7ab] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=36.0 K 2024-11-20T19:26:54,055 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a66bd9ce4ea49efb1760d135a61e5bb, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130811571 2024-11-20T19:26:54,055 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 625208d64160451396281d5ba19f0d18, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732130811940 2024-11-20T19:26:54,056 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,056 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting fc6d37936bc24f098e0f5a34cee5b7ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130812598 2024-11-20T19:26:54,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T19:26:54,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:54,057 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T19:26:54,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:54,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:54,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:54,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:54,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:54,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:54,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742264_1440 (size=4469) 2024-11-20T19:26:54,060 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#A#compaction#367 average throughput is 0.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:54,061 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/98ac278051494bd7be91d43d7cd3aadf is 175, key is test_row_0/A:col10/1732130813731/Put/seqid=0 2024-11-20T19:26:54,068 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#C#compaction#368 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:54,069 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/06091869e2264f8caf9300b4289eab3c is 50, key is test_row_0/C:col10/1732130813731/Put/seqid=0 2024-11-20T19:26:54,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112095fb826c47374ef6a70777641e867e51_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130813770/Put/seqid=0 2024-11-20T19:26:54,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742265_1441 (size=31617) 2024-11-20T19:26:54,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:54,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:54,107 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/98ac278051494bd7be91d43d7cd3aadf as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/98ac278051494bd7be91d43d7cd3aadf 2024-11-20T19:26:54,112 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/A of 2d2917374eb6d0879b57e7f84a3c009e into 98ac278051494bd7be91d43d7cd3aadf(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:54,112 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:54,112 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/A, priority=13, startTime=1732130813946; duration=0sec 2024-11-20T19:26:54,112 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:54,112 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:A 2024-11-20T19:26:54,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130874124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130874127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742266_1442 (size=12663) 2024-11-20T19:26:54,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130874127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130874128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130874133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742267_1443 (size=12304) 2024-11-20T19:26:54,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:54,165 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112095fb826c47374ef6a70777641e867e51_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112095fb826c47374ef6a70777641e867e51_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:54,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/1f6fd94d0cf3497992e53d39fec642e9, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:54,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/1f6fd94d0cf3497992e53d39fec642e9 is 175, key is test_row_0/A:col10/1732130813770/Put/seqid=0 2024-11-20T19:26:54,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T19:26:54,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742268_1444 (size=31105) 2024-11-20T19:26:54,215 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=236, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/1f6fd94d0cf3497992e53d39fec642e9 2024-11-20T19:26:54,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/f111ce7f72f44ed3887ec120cdbc5e89 is 50, key is test_row_0/B:col10/1732130813770/Put/seqid=0 2024-11-20T19:26:54,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130874237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130874237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130874237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130874238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130874241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742269_1445 (size=12151) 2024-11-20T19:26:54,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130874444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130874444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130874444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130874444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130874450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T19:26:54,541 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/06091869e2264f8caf9300b4289eab3c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/06091869e2264f8caf9300b4289eab3c 2024-11-20T19:26:54,546 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/C of 2d2917374eb6d0879b57e7f84a3c009e into 06091869e2264f8caf9300b4289eab3c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:54,546 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:54,546 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/C, priority=13, startTime=1732130813947; duration=0sec 2024-11-20T19:26:54,546 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:54,546 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:C 2024-11-20T19:26:54,692 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/f111ce7f72f44ed3887ec120cdbc5e89 2024-11-20T19:26:54,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/a8d6fa740fc34af6baa171c1ea0eae6e is 50, key is test_row_0/C:col10/1732130813770/Put/seqid=0 2024-11-20T19:26:54,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742270_1446 (size=12151) 2024-11-20T19:26:54,725 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/a8d6fa740fc34af6baa171c1ea0eae6e 2024-11-20T19:26:54,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/1f6fd94d0cf3497992e53d39fec642e9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1f6fd94d0cf3497992e53d39fec642e9 2024-11-20T19:26:54,744 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1f6fd94d0cf3497992e53d39fec642e9, entries=150, sequenceid=236, filesize=30.4 K 2024-11-20T19:26:54,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/f111ce7f72f44ed3887ec120cdbc5e89 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/f111ce7f72f44ed3887ec120cdbc5e89 2024-11-20T19:26:54,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130874751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,752 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/f111ce7f72f44ed3887ec120cdbc5e89, entries=150, sequenceid=236, filesize=11.9 K 2024-11-20T19:26:54,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/a8d6fa740fc34af6baa171c1ea0eae6e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/a8d6fa740fc34af6baa171c1ea0eae6e 2024-11-20T19:26:54,756 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/a8d6fa740fc34af6baa171c1ea0eae6e, entries=150, sequenceid=236, filesize=11.9 K 2024-11-20T19:26:54,757 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 2d2917374eb6d0879b57e7f84a3c009e in 700ms, sequenceid=236, compaction requested=false 2024-11-20T19:26:54,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:54,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:54,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-20T19:26:54,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-20T19:26:54,759 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-20T19:26:54,759 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 854 msec 2024-11-20T19:26:54,761 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 857 msec 2024-11-20T19:26:54,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:54,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T19:26:54,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:54,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:54,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:54,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:54,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:54,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:54,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205607fd21bcb146b4a2f3ccea944f8bbc_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130814762/Put/seqid=0 2024-11-20T19:26:54,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130874799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130874800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130874808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130874808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742271_1447 (size=17284) 2024-11-20T19:26:54,821 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:54,829 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205607fd21bcb146b4a2f3ccea944f8bbc_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205607fd21bcb146b4a2f3ccea944f8bbc_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:54,831 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/d327447c569f4c5b9759320928307669, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:54,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/d327447c569f4c5b9759320928307669 is 175, key is test_row_0/A:col10/1732130814762/Put/seqid=0 2024-11-20T19:26:54,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742272_1448 (size=48389) 2024-11-20T19:26:54,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130874909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130874910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130874918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:54,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130874918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T19:26:55,008 INFO [Thread-1702 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-20T19:26:55,009 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:55,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-20T19:26:55,010 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:55,011 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:55,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:55,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:55,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:55,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130875113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130875114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130875122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130875124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,162 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,163 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:55,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:55,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,163 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130875264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,268 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/d327447c569f4c5b9759320928307669 2024-11-20T19:26:55,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/a0fa089f3701488cbc13e4cd966bb159 is 50, key is test_row_0/B:col10/1732130814762/Put/seqid=0 2024-11-20T19:26:55,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:55,315 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:55,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:55,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,317 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742273_1449 (size=12151) 2024-11-20T19:26:55,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/a0fa089f3701488cbc13e4cd966bb159 2024-11-20T19:26:55,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/51db0dc87b7d424585578bd7134320eb is 50, key is test_row_0/C:col10/1732130814762/Put/seqid=0 2024-11-20T19:26:55,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742274_1450 (size=12151) 2024-11-20T19:26:55,368 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/51db0dc87b7d424585578bd7134320eb 2024-11-20T19:26:55,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/d327447c569f4c5b9759320928307669 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d327447c569f4c5b9759320928307669 2024-11-20T19:26:55,378 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d327447c569f4c5b9759320928307669, entries=250, sequenceid=255, filesize=47.3 K 2024-11-20T19:26:55,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/a0fa089f3701488cbc13e4cd966bb159 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/a0fa089f3701488cbc13e4cd966bb159 2024-11-20T19:26:55,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/a0fa089f3701488cbc13e4cd966bb159, entries=150, sequenceid=255, filesize=11.9 K 2024-11-20T19:26:55,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/51db0dc87b7d424585578bd7134320eb as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/51db0dc87b7d424585578bd7134320eb 2024-11-20T19:26:55,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/51db0dc87b7d424585578bd7134320eb, entries=150, sequenceid=255, filesize=11.9 K 2024-11-20T19:26:55,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 2d2917374eb6d0879b57e7f84a3c009e in 629ms, sequenceid=255, compaction requested=true 2024-11-20T19:26:55,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:55,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:55,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:55,391 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:55,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:55,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:55,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:55,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:26:55,392 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111111 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:55,392 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/A is initiating minor compaction (all files) 2024-11-20T19:26:55,392 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/A in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,392 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/98ac278051494bd7be91d43d7cd3aadf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1f6fd94d0cf3497992e53d39fec642e9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d327447c569f4c5b9759320928307669] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=108.5 K 2024-11-20T19:26:55,392 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:55,392 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,392 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/98ac278051494bd7be91d43d7cd3aadf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1f6fd94d0cf3497992e53d39fec642e9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d327447c569f4c5b9759320928307669] 2024-11-20T19:26:55,393 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98ac278051494bd7be91d43d7cd3aadf, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130812598 2024-11-20T19:26:55,393 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f6fd94d0cf3497992e53d39fec642e9, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732130813770 2024-11-20T19:26:55,393 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:55,393 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/B is initiating minor compaction (all files) 2024-11-20T19:26:55,393 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/B in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,393 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/835fb0f64c1d454292cc4bf15d9892d8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/f111ce7f72f44ed3887ec120cdbc5e89, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/a0fa089f3701488cbc13e4cd966bb159] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=36.1 K 2024-11-20T19:26:55,393 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d327447c569f4c5b9759320928307669, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732130814126 2024-11-20T19:26:55,393 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 835fb0f64c1d454292cc4bf15d9892d8, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130812598 2024-11-20T19:26:55,394 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting f111ce7f72f44ed3887ec120cdbc5e89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732130813770 2024-11-20T19:26:55,395 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a0fa089f3701488cbc13e4cd966bb159, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732130814127 2024-11-20T19:26:55,411 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:55,414 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#B#compaction#376 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:55,414 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/02b768ae0d104159b2fe779933659ebd is 50, key is test_row_0/B:col10/1732130814762/Put/seqid=0 2024-11-20T19:26:55,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T19:26:55,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:55,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:55,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:55,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:55,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:55,427 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120ea10d9dacea44e4db78e5d28c0acedcc_2d2917374eb6d0879b57e7f84a3c009e store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:55,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:55,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:55,429 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120ea10d9dacea44e4db78e5d28c0acedcc_2d2917374eb6d0879b57e7f84a3c009e, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:55,429 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ea10d9dacea44e4db78e5d28c0acedcc_2d2917374eb6d0879b57e7f84a3c009e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:55,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742275_1451 (size=12765) 2024-11-20T19:26:55,469 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/02b768ae0d104159b2fe779933659ebd as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/02b768ae0d104159b2fe779933659ebd 2024-11-20T19:26:55,470 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:55,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:55,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,471 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,475 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/B of 2d2917374eb6d0879b57e7f84a3c009e into 02b768ae0d104159b2fe779933659ebd(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:55,475 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:55,475 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/B, priority=13, startTime=1732130815391; duration=0sec 2024-11-20T19:26:55,475 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:55,475 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:B 2024-11-20T19:26:55,475 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:55,476 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:55,476 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/C is initiating minor compaction (all files) 2024-11-20T19:26:55,476 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/C in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,476 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/06091869e2264f8caf9300b4289eab3c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/a8d6fa740fc34af6baa171c1ea0eae6e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/51db0dc87b7d424585578bd7134320eb] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=36.1 K 2024-11-20T19:26:55,476 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 06091869e2264f8caf9300b4289eab3c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130812598 2024-11-20T19:26:55,477 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a8d6fa740fc34af6baa171c1ea0eae6e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732130813770 2024-11-20T19:26:55,477 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 51db0dc87b7d424585578bd7134320eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732130814127 2024-11-20T19:26:55,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130875470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130875472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130875477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b82db3eebf1b46e881646e2b3bda5b21_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130814799/Put/seqid=0 2024-11-20T19:26:55,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130875477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,515 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#C#compaction#378 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:55,516 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/8644af4eea1c4361ad4d116dabdb28fb is 50, key is test_row_0/C:col10/1732130814762/Put/seqid=0 2024-11-20T19:26:55,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742276_1452 (size=4469) 2024-11-20T19:26:55,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742277_1453 (size=14994) 2024-11-20T19:26:55,572 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:55,575 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b82db3eebf1b46e881646e2b3bda5b21_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b82db3eebf1b46e881646e2b3bda5b21_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:55,576 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/bcee91c59e2c494fb3e76b4e20f870e9, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:55,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/bcee91c59e2c494fb3e76b4e20f870e9 is 175, key is test_row_0/A:col10/1732130814799/Put/seqid=0 2024-11-20T19:26:55,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742278_1454 (size=12765) 2024-11-20T19:26:55,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130875589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,595 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/8644af4eea1c4361ad4d116dabdb28fb as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/8644af4eea1c4361ad4d116dabdb28fb 2024-11-20T19:26:55,599 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/C of 2d2917374eb6d0879b57e7f84a3c009e into 8644af4eea1c4361ad4d116dabdb28fb(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:55,599 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:55,599 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/C, priority=13, startTime=1732130815391; duration=0sec 2024-11-20T19:26:55,599 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:55,599 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:C 2024-11-20T19:26:55,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130875590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130875590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130875591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742279_1455 (size=39949) 2024-11-20T19:26:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:55,623 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:55,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:55,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,624 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,776 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:55,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:55,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130875793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130875801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130875807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130875807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,924 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#A#compaction#375 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:55,925 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/5d2f618355f44a0687a3db7d9b59a88a is 175, key is test_row_0/A:col10/1732130814762/Put/seqid=0 2024-11-20T19:26:55,929 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:55,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:55,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742280_1456 (size=31719) 2024-11-20T19:26:55,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:55,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:55,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,938 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/5d2f618355f44a0687a3db7d9b59a88a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/5d2f618355f44a0687a3db7d9b59a88a 2024-11-20T19:26:55,943 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/A of 2d2917374eb6d0879b57e7f84a3c009e into 5d2f618355f44a0687a3db7d9b59a88a(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:55,943 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:55,943 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/A, priority=13, startTime=1732130815391; duration=0sec 2024-11-20T19:26:55,943 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:55,943 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:A 2024-11-20T19:26:56,010 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=275, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/bcee91c59e2c494fb3e76b4e20f870e9 2024-11-20T19:26:56,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/c350cade0ba743a89e5a0f0da32d683b is 50, key is test_row_0/B:col10/1732130814799/Put/seqid=0 2024-11-20T19:26:56,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742281_1457 (size=12301) 2024-11-20T19:26:56,082 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:56,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:56,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:56,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:56,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130876100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130876108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130876109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130876109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:56,235 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,236 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:56,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:56,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:56,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:56,236 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130876272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,389 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:56,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:56,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:56,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:56,390 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/c350cade0ba743a89e5a0f0da32d683b 2024-11-20T19:26:56,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/9869671c4f534945811c5b1db4afc28c is 50, key is test_row_0/C:col10/1732130814799/Put/seqid=0 2024-11-20T19:26:56,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742282_1458 (size=12301) 2024-11-20T19:26:56,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/9869671c4f534945811c5b1db4afc28c 2024-11-20T19:26:56,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/bcee91c59e2c494fb3e76b4e20f870e9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/bcee91c59e2c494fb3e76b4e20f870e9 2024-11-20T19:26:56,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/bcee91c59e2c494fb3e76b4e20f870e9, entries=200, sequenceid=275, filesize=39.0 K 2024-11-20T19:26:56,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/c350cade0ba743a89e5a0f0da32d683b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/c350cade0ba743a89e5a0f0da32d683b 2024-11-20T19:26:56,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/c350cade0ba743a89e5a0f0da32d683b, entries=150, sequenceid=275, filesize=12.0 K 2024-11-20T19:26:56,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/9869671c4f534945811c5b1db4afc28c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/9869671c4f534945811c5b1db4afc28c 2024-11-20T19:26:56,543 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:56,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:56,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:56,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:56,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/9869671c4f534945811c5b1db4afc28c, entries=150, sequenceid=275, filesize=12.0 K 2024-11-20T19:26:56,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 2d2917374eb6d0879b57e7f84a3c009e in 1123ms, sequenceid=275, compaction requested=false 2024-11-20T19:26:56,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:56,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:56,611 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T19:26:56,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:56,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:56,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:56,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:56,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:56,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:56,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120eae8d53b6e2e4e3481ca5f8f85289454_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130815475/Put/seqid=0 2024-11-20T19:26:56,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742283_1459 (size=14994) 2024-11-20T19:26:56,646 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:56,658 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120eae8d53b6e2e4e3481ca5f8f85289454_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120eae8d53b6e2e4e3481ca5f8f85289454_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:56,658 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/277cb1e40a33401db93abb843ef3d8c9, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:56,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/277cb1e40a33401db93abb843ef3d8c9 is 175, key is test_row_0/A:col10/1732130815475/Put/seqid=0 2024-11-20T19:26:56,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130876654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130876654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130876655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130876655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742284_1460 (size=39949) 2024-11-20T19:26:56,696 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:56,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:56,697 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130876765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130876766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130876766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130876766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,850 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:56,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:56,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:56,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:56,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130876973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130876979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130876979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:56,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130876979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,005 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,006 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:57,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:57,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,006 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,086 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/277cb1e40a33401db93abb843ef3d8c9 2024-11-20T19:26:57,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/728f27d7a91549609fbff7f8098fa7b7 is 50, key is test_row_0/B:col10/1732130815475/Put/seqid=0 2024-11-20T19:26:57,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742285_1461 (size=12301) 2024-11-20T19:26:57,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:57,158 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:57,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:57,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:57,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130877283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:57,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130877284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:57,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130877286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:57,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130877286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,310 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:57,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:57,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,311 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,462 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:57,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:57,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,463 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/728f27d7a91549609fbff7f8098fa7b7 2024-11-20T19:26:57,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/453ca8813b3a49daab6249f276a98efa is 50, key is test_row_0/C:col10/1732130815475/Put/seqid=0 2024-11-20T19:26:57,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742286_1462 (size=12301) 2024-11-20T19:26:57,614 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:57,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:57,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,615 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,767 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:57,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:57,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:57,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130877788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130877789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130877790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130877790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,908 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/453ca8813b3a49daab6249f276a98efa 2024-11-20T19:26:57,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/277cb1e40a33401db93abb843ef3d8c9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/277cb1e40a33401db93abb843ef3d8c9 2024-11-20T19:26:57,913 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/277cb1e40a33401db93abb843ef3d8c9, entries=200, sequenceid=295, filesize=39.0 K 2024-11-20T19:26:57,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/728f27d7a91549609fbff7f8098fa7b7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/728f27d7a91549609fbff7f8098fa7b7 2024-11-20T19:26:57,916 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/728f27d7a91549609fbff7f8098fa7b7, entries=150, sequenceid=295, filesize=12.0 K 2024-11-20T19:26:57,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/453ca8813b3a49daab6249f276a98efa as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/453ca8813b3a49daab6249f276a98efa 2024-11-20T19:26:57,919 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/453ca8813b3a49daab6249f276a98efa, entries=150, sequenceid=295, filesize=12.0 K 2024-11-20T19:26:57,919 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:57,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:57,920 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 2d2917374eb6d0879b57e7f84a3c009e in 1309ms, sequenceid=295, compaction requested=true 2024-11-20T19:26:57,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:57,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:57,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:57,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:57,920 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T19:26:57,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:57,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:57,920 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:57,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:57,920 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:57,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:57,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:57,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:57,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:57,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:57,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:57,920 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111617 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:57,920 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:57,920 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/B is initiating minor compaction (all files) 2024-11-20T19:26:57,920 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/A is initiating minor compaction (all files) 2024-11-20T19:26:57,921 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/B in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,921 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/A in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,921 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/02b768ae0d104159b2fe779933659ebd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/c350cade0ba743a89e5a0f0da32d683b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/728f27d7a91549609fbff7f8098fa7b7] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=36.5 K 2024-11-20T19:26:57,921 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/5d2f618355f44a0687a3db7d9b59a88a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/bcee91c59e2c494fb3e76b4e20f870e9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/277cb1e40a33401db93abb843ef3d8c9] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=109.0 K 2024-11-20T19:26:57,921 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,921 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/5d2f618355f44a0687a3db7d9b59a88a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/bcee91c59e2c494fb3e76b4e20f870e9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/277cb1e40a33401db93abb843ef3d8c9] 2024-11-20T19:26:57,921 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 02b768ae0d104159b2fe779933659ebd, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732130814127 2024-11-20T19:26:57,924 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d2f618355f44a0687a3db7d9b59a88a, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732130814127 2024-11-20T19:26:57,924 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c350cade0ba743a89e5a0f0da32d683b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732130814799 2024-11-20T19:26:57,924 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcee91c59e2c494fb3e76b4e20f870e9, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732130814799 2024-11-20T19:26:57,924 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 728f27d7a91549609fbff7f8098fa7b7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130815472 2024-11-20T19:26:57,925 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 277cb1e40a33401db93abb843ef3d8c9, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130815442 2024-11-20T19:26:57,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120723d57aeb54f4d5abccd0ae1b539423f_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130816654/Put/seqid=0 2024-11-20T19:26:57,937 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:57,938 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#B#compaction#385 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:57,939 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/7370602ba552449a9016178c4b2bcf2f is 50, key is test_row_0/B:col10/1732130815475/Put/seqid=0 2024-11-20T19:26:57,943 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112018c25cba240646cb9da7fc93beb7662e_2d2917374eb6d0879b57e7f84a3c009e store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:57,944 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112018c25cba240646cb9da7fc93beb7662e_2d2917374eb6d0879b57e7f84a3c009e, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:57,944 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112018c25cba240646cb9da7fc93beb7662e_2d2917374eb6d0879b57e7f84a3c009e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:57,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742287_1463 (size=12454) 2024-11-20T19:26:57,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742288_1464 (size=13017) 2024-11-20T19:26:57,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742289_1465 (size=4469) 2024-11-20T19:26:57,959 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#A#compaction#386 average throughput is 1.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:57,959 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/b68536198bf24154ab2fdd4dc5396bda is 175, key is test_row_0/A:col10/1732130815475/Put/seqid=0 2024-11-20T19:26:57,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742290_1466 (size=31971) 2024-11-20T19:26:57,968 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/b68536198bf24154ab2fdd4dc5396bda as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/b68536198bf24154ab2fdd4dc5396bda 2024-11-20T19:26:57,971 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/A of 2d2917374eb6d0879b57e7f84a3c009e into b68536198bf24154ab2fdd4dc5396bda(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:57,971 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:57,971 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/A, priority=13, startTime=1732130817920; duration=0sec 2024-11-20T19:26:57,971 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:57,971 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:A 2024-11-20T19:26:57,971 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:57,972 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:57,972 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/C is initiating minor compaction (all files) 2024-11-20T19:26:57,972 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/C in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:57,972 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/8644af4eea1c4361ad4d116dabdb28fb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/9869671c4f534945811c5b1db4afc28c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/453ca8813b3a49daab6249f276a98efa] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=36.5 K 2024-11-20T19:26:57,972 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8644af4eea1c4361ad4d116dabdb28fb, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732130814127 2024-11-20T19:26:57,973 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9869671c4f534945811c5b1db4afc28c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732130814799 2024-11-20T19:26:57,973 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 453ca8813b3a49daab6249f276a98efa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130815472 2024-11-20T19:26:57,977 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#C#compaction#387 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:57,977 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/7c67d7628371436d8349024976530828 is 50, key is test_row_0/C:col10/1732130815475/Put/seqid=0 2024-11-20T19:26:57,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742291_1467 (size=13017) 2024-11-20T19:26:58,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:58,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:58,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:58,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:58,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130878346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:58,349 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120723d57aeb54f4d5abccd0ae1b539423f_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120723d57aeb54f4d5abccd0ae1b539423f_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:58,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/33110373ae3a4d5b8dd196ca278b0e32, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:58,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/33110373ae3a4d5b8dd196ca278b0e32 is 175, key is test_row_0/A:col10/1732130816654/Put/seqid=0 2024-11-20T19:26:58,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742292_1468 (size=31255) 2024-11-20T19:26:58,354 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/33110373ae3a4d5b8dd196ca278b0e32 2024-11-20T19:26:58,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/13e7885131c04bfa8f46b5d585c1a11a is 50, key is test_row_0/B:col10/1732130816654/Put/seqid=0 2024-11-20T19:26:58,361 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/7370602ba552449a9016178c4b2bcf2f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/7370602ba552449a9016178c4b2bcf2f 2024-11-20T19:26:58,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742293_1469 (size=12301) 2024-11-20T19:26:58,363 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/13e7885131c04bfa8f46b5d585c1a11a 2024-11-20T19:26:58,366 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/B of 2d2917374eb6d0879b57e7f84a3c009e into 7370602ba552449a9016178c4b2bcf2f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:58,366 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:58,366 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/B, priority=13, startTime=1732130817920; duration=0sec 2024-11-20T19:26:58,366 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:58,366 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:B 2024-11-20T19:26:58,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/91396aace2eb412b8a72761feaca4dad is 50, key is test_row_0/C:col10/1732130816654/Put/seqid=0 2024-11-20T19:26:58,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742294_1470 (size=12301) 2024-11-20T19:26:58,386 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/7c67d7628371436d8349024976530828 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/7c67d7628371436d8349024976530828 2024-11-20T19:26:58,390 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/C of 2d2917374eb6d0879b57e7f84a3c009e into 7c67d7628371436d8349024976530828(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:58,390 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:58,390 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/C, priority=13, startTime=1732130817920; duration=0sec 2024-11-20T19:26:58,390 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:58,390 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:C 2024-11-20T19:26:58,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:58,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130878450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:58,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:58,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130878653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:58,777 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/91396aace2eb412b8a72761feaca4dad 2024-11-20T19:26:58,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/33110373ae3a4d5b8dd196ca278b0e32 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/33110373ae3a4d5b8dd196ca278b0e32 2024-11-20T19:26:58,783 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/33110373ae3a4d5b8dd196ca278b0e32, entries=150, sequenceid=315, filesize=30.5 K 2024-11-20T19:26:58,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/13e7885131c04bfa8f46b5d585c1a11a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/13e7885131c04bfa8f46b5d585c1a11a 2024-11-20T19:26:58,787 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/13e7885131c04bfa8f46b5d585c1a11a, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T19:26:58,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/91396aace2eb412b8a72761feaca4dad as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/91396aace2eb412b8a72761feaca4dad 2024-11-20T19:26:58,791 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/91396aace2eb412b8a72761feaca4dad, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T19:26:58,792 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 2d2917374eb6d0879b57e7f84a3c009e in 871ms, sequenceid=315, compaction requested=false 2024-11-20T19:26:58,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:26:58,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:58,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-20T19:26:58,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-20T19:26:58,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-20T19:26:58,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.7820 sec 2024-11-20T19:26:58,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 3.7850 sec 2024-11-20T19:26:58,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:58,797 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T19:26:58,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:26:58,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:58,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:26:58,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:58,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:26:58,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:58,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201f1f0c0867db40d1bb74b8a2416c1fe0_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130818337/Put/seqid=0 2024-11-20T19:26:58,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742295_1471 (size=14994) 2024-11-20T19:26:58,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:58,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130878829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:58,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:58,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130878830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:58,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:58,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130878831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:58,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:58,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130878832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:58,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:58,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130878938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:58,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:58,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130878938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:58,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:58,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130878939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:58,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:58,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130878939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:58,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:58,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130878959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:59,118 INFO [Thread-1702 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-20T19:26:59,119 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:59,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-20T19:26:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:26:59,120 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:59,120 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:59,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:59,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130879143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130879143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130879144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130879145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,220 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:26:59,222 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201f1f0c0867db40d1bb74b8a2416c1fe0_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201f1f0c0867db40d1bb74b8a2416c1fe0_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:26:59,223 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/a05c21380b2b481a90cc86c19c9a8f4a, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:26:59,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/a05c21380b2b481a90cc86c19c9a8f4a is 175, key is test_row_0/A:col10/1732130818337/Put/seqid=0 2024-11-20T19:26:59,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742296_1472 (size=39949) 2024-11-20T19:26:59,272 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:26:59,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:59,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:59,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:59,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:26:59,425 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:26:59,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:59,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:59,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:59,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130879448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130879448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130879448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130879451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130879464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,577 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,577 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:26:59,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:59,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:59,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:59,577 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,626 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=335, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/a05c21380b2b481a90cc86c19c9a8f4a 2024-11-20T19:26:59,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/1887f5d52ee74356924f95070f559795 is 50, key is test_row_0/B:col10/1732130818337/Put/seqid=0 2024-11-20T19:26:59,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742297_1473 (size=12301) 2024-11-20T19:26:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:26:59,729 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:26:59,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:59,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:59,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:59,730 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,774 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T19:26:59,881 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:26:59,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:59,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:26:59,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:26:59,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:59,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130879953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130879953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130879955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:26:59,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130879959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,033 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:00,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:00,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,034 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/1887f5d52ee74356924f95070f559795 2024-11-20T19:27:00,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/37bacf6d90424652a2fefc4f2d9daa07 is 50, key is test_row_0/C:col10/1732130818337/Put/seqid=0 2024-11-20T19:27:00,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742298_1474 (size=12301) 2024-11-20T19:27:00,186 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:00,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:00,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,186 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:27:00,338 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:00,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:00,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,338 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/37bacf6d90424652a2fefc4f2d9daa07 2024-11-20T19:27:00,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/a05c21380b2b481a90cc86c19c9a8f4a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/a05c21380b2b481a90cc86c19c9a8f4a 2024-11-20T19:27:00,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/a05c21380b2b481a90cc86c19c9a8f4a, entries=200, sequenceid=335, filesize=39.0 K 2024-11-20T19:27:00,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/1887f5d52ee74356924f95070f559795 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/1887f5d52ee74356924f95070f559795 2024-11-20T19:27:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/1887f5d52ee74356924f95070f559795, entries=150, sequenceid=335, filesize=12.0 K 2024-11-20T19:27:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/37bacf6d90424652a2fefc4f2d9daa07 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/37bacf6d90424652a2fefc4f2d9daa07 2024-11-20T19:27:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/37bacf6d90424652a2fefc4f2d9daa07, entries=150, sequenceid=335, filesize=12.0 K 2024-11-20T19:27:00,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 2d2917374eb6d0879b57e7f84a3c009e in 1663ms, sequenceid=335, compaction requested=true 2024-11-20T19:27:00,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:27:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:00,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:00,459 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:00,459 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:00,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:00,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:00,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d2917374eb6d0879b57e7f84a3c009e:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:00,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,460 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:00,460 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103175 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:00,460 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/A is initiating minor compaction (all files) 2024-11-20T19:27:00,460 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/B is initiating minor compaction (all files) 2024-11-20T19:27:00,460 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/A in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,460 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/B in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,460 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/b68536198bf24154ab2fdd4dc5396bda, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/33110373ae3a4d5b8dd196ca278b0e32, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/a05c21380b2b481a90cc86c19c9a8f4a] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=100.8 K 2024-11-20T19:27:00,460 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/7370602ba552449a9016178c4b2bcf2f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/13e7885131c04bfa8f46b5d585c1a11a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/1887f5d52ee74356924f95070f559795] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=36.7 K 2024-11-20T19:27:00,460 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,460 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/b68536198bf24154ab2fdd4dc5396bda, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/33110373ae3a4d5b8dd196ca278b0e32, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/a05c21380b2b481a90cc86c19c9a8f4a] 2024-11-20T19:27:00,460 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 7370602ba552449a9016178c4b2bcf2f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130815472 2024-11-20T19:27:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,460 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 13e7885131c04bfa8f46b5d585c1a11a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732130816652 2024-11-20T19:27:00,460 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting b68536198bf24154ab2fdd4dc5396bda, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130815472 2024-11-20T19:27:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,461 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33110373ae3a4d5b8dd196ca278b0e32, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732130816652 2024-11-20T19:27:00,461 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 1887f5d52ee74356924f95070f559795, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732130818327 2024-11-20T19:27:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,461 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting a05c21380b2b481a90cc86c19c9a8f4a, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732130818327 2024-11-20T19:27:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,468 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#B#compaction#393 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,469 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/f87b30cc81604c2cb88e0c1c0e95cfc6 is 50, key is test_row_0/B:col10/1732130818337/Put/seqid=0 2024-11-20T19:27:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,475 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:27:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,482 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411201287eb795a68478c85416d6ca59948f1_2d2917374eb6d0879b57e7f84a3c009e store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:27:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,484 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411201287eb795a68478c85416d6ca59948f1_2d2917374eb6d0879b57e7f84a3c009e, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:27:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,484 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201287eb795a68478c85416d6ca59948f1_2d2917374eb6d0879b57e7f84a3c009e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:27:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742299_1475 (size=13119) 2024-11-20T19:27:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,490 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:00,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742300_1476 (size=4469) 2024-11-20T19:27:00,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T19:27:00,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:27:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:00,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:27:00,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:00,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:00,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:27:00,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:00,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:00,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,491 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202b89e629d5aa40ea8e06ee8630885dbe_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130818829/Put/seqid=0 2024-11-20T19:27:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742301_1477 (size=12454) 2024-11-20T19:27:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130880573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,643 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:00,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:00,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:00,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130880674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,795 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:00,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:00,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:00,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130880881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,893 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#A#compaction#394 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:00,894 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/1a271edbeb8647268d7af94622447611 is 175, key is test_row_0/A:col10/1732130818337/Put/seqid=0 2024-11-20T19:27:00,897 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/f87b30cc81604c2cb88e0c1c0e95cfc6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/f87b30cc81604c2cb88e0c1c0e95cfc6 2024-11-20T19:27:00,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742302_1478 (size=32073) 2024-11-20T19:27:00,901 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:00,902 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/B of 2d2917374eb6d0879b57e7f84a3c009e into f87b30cc81604c2cb88e0c1c0e95cfc6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:00,902 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:27:00,902 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/B, priority=13, startTime=1732130820459; duration=0sec 2024-11-20T19:27:00,902 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:00,902 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:B 2024-11-20T19:27:00,903 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:00,903 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:00,903 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 2d2917374eb6d0879b57e7f84a3c009e/C is initiating minor compaction (all files) 2024-11-20T19:27:00,903 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d2917374eb6d0879b57e7f84a3c009e/C in TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,904 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/7c67d7628371436d8349024976530828, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/91396aace2eb412b8a72761feaca4dad, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/37bacf6d90424652a2fefc4f2d9daa07] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp, totalSize=36.7 K 2024-11-20T19:27:00,904 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c67d7628371436d8349024976530828, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130815472 2024-11-20T19:27:00,904 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202b89e629d5aa40ea8e06ee8630885dbe_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202b89e629d5aa40ea8e06ee8630885dbe_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:00,904 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 91396aace2eb412b8a72761feaca4dad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732130816652 2024-11-20T19:27:00,905 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/6f3f6b0ab50f4022b529d2c44745a873, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:27:00,905 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 37bacf6d90424652a2fefc4f2d9daa07, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732130818327 2024-11-20T19:27:00,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/6f3f6b0ab50f4022b529d2c44745a873 is 175, key is test_row_0/A:col10/1732130818829/Put/seqid=0 2024-11-20T19:27:00,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742303_1479 (size=31255) 2024-11-20T19:27:00,908 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=355, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/6f3f6b0ab50f4022b529d2c44745a873 2024-11-20T19:27:00,912 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d2917374eb6d0879b57e7f84a3c009e#C#compaction#396 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:00,912 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/53a090d586124115b1e8c210a1cc2f73 is 50, key is test_row_0/C:col10/1732130818337/Put/seqid=0 2024-11-20T19:27:00,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/8847c5083fa24f8a924c90d831b28a3a is 50, key is test_row_0/B:col10/1732130818829/Put/seqid=0 2024-11-20T19:27:00,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742304_1480 (size=13119) 2024-11-20T19:27:00,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742305_1481 (size=12301) 2024-11-20T19:27:00,948 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:00,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:00,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:00,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:00,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:00,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130880960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:00,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130880961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:00,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130880962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:00,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130880972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:01,100 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:01,100 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:01,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:01,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:01,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:01,100 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130881186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:01,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:27:01,252 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:01,252 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:01,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:01,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:01,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:01,253 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,304 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/1a271edbeb8647268d7af94622447611 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1a271edbeb8647268d7af94622447611 2024-11-20T19:27:01,307 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/A of 2d2917374eb6d0879b57e7f84a3c009e into 1a271edbeb8647268d7af94622447611(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:01,307 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:27:01,307 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/A, priority=13, startTime=1732130820459; duration=0sec 2024-11-20T19:27:01,307 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:01,307 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:A 2024-11-20T19:27:01,317 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/8847c5083fa24f8a924c90d831b28a3a 2024-11-20T19:27:01,321 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/53a090d586124115b1e8c210a1cc2f73 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/53a090d586124115b1e8c210a1cc2f73 2024-11-20T19:27:01,324 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/d89dc62af532431ca4e6774ea0f3dfc6 is 50, key is test_row_0/C:col10/1732130818829/Put/seqid=0 2024-11-20T19:27:01,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742306_1482 (size=12301) 2024-11-20T19:27:01,327 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d2917374eb6d0879b57e7f84a3c009e/C of 2d2917374eb6d0879b57e7f84a3c009e into 53a090d586124115b1e8c210a1cc2f73(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:01,327 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:27:01,327 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., storeName=2d2917374eb6d0879b57e7f84a3c009e/C, priority=13, startTime=1732130820459; duration=0sec 2024-11-20T19:27:01,327 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:01,327 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d2917374eb6d0879b57e7f84a3c009e:C 2024-11-20T19:27:01,404 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:01,405 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:01,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:01,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:01,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:01,405 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,557 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:01,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:01,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:01,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:01,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:01,557 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:01,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130881694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:01,709 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:01,709 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:01,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:01,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:01,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:01,709 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:01,727 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/d89dc62af532431ca4e6774ea0f3dfc6 2024-11-20T19:27:01,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/6f3f6b0ab50f4022b529d2c44745a873 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/6f3f6b0ab50f4022b529d2c44745a873 2024-11-20T19:27:01,733 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/6f3f6b0ab50f4022b529d2c44745a873, entries=150, sequenceid=355, filesize=30.5 K 2024-11-20T19:27:01,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/8847c5083fa24f8a924c90d831b28a3a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/8847c5083fa24f8a924c90d831b28a3a 2024-11-20T19:27:01,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/8847c5083fa24f8a924c90d831b28a3a, entries=150, sequenceid=355, filesize=12.0 K 2024-11-20T19:27:01,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/d89dc62af532431ca4e6774ea0f3dfc6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/d89dc62af532431ca4e6774ea0f3dfc6 2024-11-20T19:27:01,742 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/d89dc62af532431ca4e6774ea0f3dfc6, entries=150, sequenceid=355, filesize=12.0 K 2024-11-20T19:27:01,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 2d2917374eb6d0879b57e7f84a3c009e in 1251ms, sequenceid=355, compaction requested=false 2024-11-20T19:27:01,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:27:01,861 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:01,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:27:01,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:01,862 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T19:27:01,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:27:01,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:01,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:27:01,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:01,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:27:01,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:01,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120376e0c988a3e4d41baeacf4f18151403_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130820572/Put/seqid=0 2024-11-20T19:27:01,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742307_1483 (size=12454) 2024-11-20T19:27:02,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:02,274 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120376e0c988a3e4d41baeacf4f18151403_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120376e0c988a3e4d41baeacf4f18151403_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:02,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/6dd088a9f99347aa8cc5868ea2c191a9, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:27:02,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/6dd088a9f99347aa8cc5868ea2c191a9 is 175, key is test_row_0/A:col10/1732130820572/Put/seqid=0 2024-11-20T19:27:02,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742308_1484 (size=31255) 2024-11-20T19:27:02,678 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=374, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/6dd088a9f99347aa8cc5868ea2c191a9 2024-11-20T19:27:02,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/ed6c78954ffc42bdb1f46745e4b68bf3 is 50, key is test_row_0/B:col10/1732130820572/Put/seqid=0 2024-11-20T19:27:02,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742309_1485 (size=12301) 2024-11-20T19:27:02,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:02,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:02,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130882840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:02,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130882947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:02,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48684 deadline: 1732130882975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:02,982 DEBUG [Thread-1696 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:02,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48716 deadline: 1732130882983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:02,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48744 deadline: 1732130882983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:02,990 DEBUG [Thread-1700 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:02,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48670 deadline: 1732130882984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:02,990 DEBUG [Thread-1694 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:02,991 DEBUG [Thread-1692 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:03,097 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/ed6c78954ffc42bdb1f46745e4b68bf3 2024-11-20T19:27:03,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/0db37a697c974c57947c311d7928018a is 50, key is test_row_0/C:col10/1732130820572/Put/seqid=0 2024-11-20T19:27:03,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742310_1486 (size=12301) 2024-11-20T19:27:03,132 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/0db37a697c974c57947c311d7928018a 2024-11-20T19:27:03,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/6dd088a9f99347aa8cc5868ea2c191a9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/6dd088a9f99347aa8cc5868ea2c191a9 2024-11-20T19:27:03,144 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/6dd088a9f99347aa8cc5868ea2c191a9, entries=150, sequenceid=374, filesize=30.5 K 2024-11-20T19:27:03,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/ed6c78954ffc42bdb1f46745e4b68bf3 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/ed6c78954ffc42bdb1f46745e4b68bf3 2024-11-20T19:27:03,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48694 deadline: 1732130883153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:03,157 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/ed6c78954ffc42bdb1f46745e4b68bf3, entries=150, sequenceid=374, filesize=12.0 K 2024-11-20T19:27:03,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/0db37a697c974c57947c311d7928018a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/0db37a697c974c57947c311d7928018a 2024-11-20T19:27:03,163 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/0db37a697c974c57947c311d7928018a, entries=150, sequenceid=374, filesize=12.0 K 2024-11-20T19:27:03,164 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 2d2917374eb6d0879b57e7f84a3c009e in 1302ms, sequenceid=374, compaction requested=true 2024-11-20T19:27:03,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:27:03,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:03,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-20T19:27:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-20T19:27:03,166 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-20T19:27:03,166 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.0450 sec 2024-11-20T19:27:03,169 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 4.0480 sec 2024-11-20T19:27:03,205 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1380df6df04d4584aa5f0c0089e205c3, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/eec1fdeefa8746a7899668f9be5f8b08, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/0da5d00675424532a3d043247070e915, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/36012ac0601244cfb3b20020d1dbed60, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/35075c8e4cfd423d9be80191ba0cba8e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/213196d5daa34471942c257e4d6b24ae, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/16dbd6bf78934995a3312222fcd2a3a7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2d8d5855c64049b791c8d7eda9cd17d3, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2086d1545c09469fac7a22f097a2ea9a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d62bd502ed0b4d74800dcc3f7d299117, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d65ccc9d27ca473282cf5e55154c7291, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/7683cea3488e4338965d7b125f8187af, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/55b905288c4042d7b8cd3165516145c7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/364e12e4e0b8487895570cd672af15d7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/c668c02b46e04fbeb669a4303c438584, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/98ac278051494bd7be91d43d7cd3aadf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1f6fd94d0cf3497992e53d39fec642e9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d327447c569f4c5b9759320928307669, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/5d2f618355f44a0687a3db7d9b59a88a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/bcee91c59e2c494fb3e76b4e20f870e9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/277cb1e40a33401db93abb843ef3d8c9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/b68536198bf24154ab2fdd4dc5396bda, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/33110373ae3a4d5b8dd196ca278b0e32, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/a05c21380b2b481a90cc86c19c9a8f4a] to archive 2024-11-20T19:27:03,207 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:03,209 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1380df6df04d4584aa5f0c0089e205c3 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1380df6df04d4584aa5f0c0089e205c3 2024-11-20T19:27:03,210 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/eec1fdeefa8746a7899668f9be5f8b08 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/eec1fdeefa8746a7899668f9be5f8b08 2024-11-20T19:27:03,211 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/0da5d00675424532a3d043247070e915 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/0da5d00675424532a3d043247070e915 2024-11-20T19:27:03,212 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/36012ac0601244cfb3b20020d1dbed60 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/36012ac0601244cfb3b20020d1dbed60 2024-11-20T19:27:03,214 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/35075c8e4cfd423d9be80191ba0cba8e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/35075c8e4cfd423d9be80191ba0cba8e 2024-11-20T19:27:03,215 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/213196d5daa34471942c257e4d6b24ae to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/213196d5daa34471942c257e4d6b24ae 2024-11-20T19:27:03,217 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/16dbd6bf78934995a3312222fcd2a3a7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/16dbd6bf78934995a3312222fcd2a3a7 2024-11-20T19:27:03,218 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2d8d5855c64049b791c8d7eda9cd17d3 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2d8d5855c64049b791c8d7eda9cd17d3 2024-11-20T19:27:03,219 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2086d1545c09469fac7a22f097a2ea9a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/2086d1545c09469fac7a22f097a2ea9a 2024-11-20T19:27:03,221 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d62bd502ed0b4d74800dcc3f7d299117 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d62bd502ed0b4d74800dcc3f7d299117 2024-11-20T19:27:03,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:27:03,224 INFO [Thread-1702 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-20T19:27:03,226 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:03,226 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d65ccc9d27ca473282cf5e55154c7291 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d65ccc9d27ca473282cf5e55154c7291 2024-11-20T19:27:03,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-20T19:27:03,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T19:27:03,227 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:03,227 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:03,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:03,228 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/7683cea3488e4338965d7b125f8187af to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/7683cea3488e4338965d7b125f8187af 2024-11-20T19:27:03,230 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/55b905288c4042d7b8cd3165516145c7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/55b905288c4042d7b8cd3165516145c7 2024-11-20T19:27:03,231 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/364e12e4e0b8487895570cd672af15d7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/364e12e4e0b8487895570cd672af15d7 2024-11-20T19:27:03,239 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/c668c02b46e04fbeb669a4303c438584 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/c668c02b46e04fbeb669a4303c438584 2024-11-20T19:27:03,242 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/98ac278051494bd7be91d43d7cd3aadf to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/98ac278051494bd7be91d43d7cd3aadf 2024-11-20T19:27:03,243 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1f6fd94d0cf3497992e53d39fec642e9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1f6fd94d0cf3497992e53d39fec642e9 2024-11-20T19:27:03,244 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d327447c569f4c5b9759320928307669 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/d327447c569f4c5b9759320928307669 2024-11-20T19:27:03,244 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/5d2f618355f44a0687a3db7d9b59a88a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/5d2f618355f44a0687a3db7d9b59a88a 2024-11-20T19:27:03,247 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/bcee91c59e2c494fb3e76b4e20f870e9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/bcee91c59e2c494fb3e76b4e20f870e9 2024-11-20T19:27:03,247 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/277cb1e40a33401db93abb843ef3d8c9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/277cb1e40a33401db93abb843ef3d8c9 2024-11-20T19:27:03,248 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/b68536198bf24154ab2fdd4dc5396bda to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/b68536198bf24154ab2fdd4dc5396bda 2024-11-20T19:27:03,249 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/33110373ae3a4d5b8dd196ca278b0e32 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/33110373ae3a4d5b8dd196ca278b0e32 2024-11-20T19:27:03,250 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/a05c21380b2b481a90cc86c19c9a8f4a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/a05c21380b2b481a90cc86c19c9a8f4a 2024-11-20T19:27:03,254 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/1b8e48ab2d6d4f3e9478691e23f70d87, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/becadab61fbc4e05bd15f2c78138f99c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/5eb61774c2df4d8ea31bc3a501127448, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/7b6b06497af643bba08b12970f3841d1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/b48e0738db8e4e6a8a692f11da507306, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/3bc2070f788d40e8b4fa723b1e500467, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/4d63be3616d244eea997bf78656e5626, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/3ed5bc2944d64b4886b7a450b1db23e5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/e94a06641dbd4c21bfbe3e9f90954214, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/b035ecac4a214b189da2e314851a67a4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/35a9c2faab8b40feb12c5e0dbae625e8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/0b7bdef7d7c1473ea3e5aaa402f06943, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/c90c5a27772a4b14b73b9b558d347792, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/90e38d990afb4077b6d75a2e17be0a53, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/835fb0f64c1d454292cc4bf15d9892d8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/dc5836da34554fb6a110d04600b4b142, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/f111ce7f72f44ed3887ec120cdbc5e89, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/02b768ae0d104159b2fe779933659ebd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/a0fa089f3701488cbc13e4cd966bb159, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/c350cade0ba743a89e5a0f0da32d683b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/7370602ba552449a9016178c4b2bcf2f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/728f27d7a91549609fbff7f8098fa7b7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/13e7885131c04bfa8f46b5d585c1a11a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/1887f5d52ee74356924f95070f559795] to archive 2024-11-20T19:27:03,255 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:03,256 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/1b8e48ab2d6d4f3e9478691e23f70d87 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/1b8e48ab2d6d4f3e9478691e23f70d87 2024-11-20T19:27:03,257 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/becadab61fbc4e05bd15f2c78138f99c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/becadab61fbc4e05bd15f2c78138f99c 2024-11-20T19:27:03,258 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/5eb61774c2df4d8ea31bc3a501127448 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/5eb61774c2df4d8ea31bc3a501127448 2024-11-20T19:27:03,259 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/7b6b06497af643bba08b12970f3841d1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/7b6b06497af643bba08b12970f3841d1 2024-11-20T19:27:03,259 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/b48e0738db8e4e6a8a692f11da507306 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/b48e0738db8e4e6a8a692f11da507306 2024-11-20T19:27:03,260 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/3bc2070f788d40e8b4fa723b1e500467 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/3bc2070f788d40e8b4fa723b1e500467 2024-11-20T19:27:03,261 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/4d63be3616d244eea997bf78656e5626 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/4d63be3616d244eea997bf78656e5626 2024-11-20T19:27:03,262 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/3ed5bc2944d64b4886b7a450b1db23e5 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/3ed5bc2944d64b4886b7a450b1db23e5 2024-11-20T19:27:03,270 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/e94a06641dbd4c21bfbe3e9f90954214 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/e94a06641dbd4c21bfbe3e9f90954214 2024-11-20T19:27:03,287 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/b035ecac4a214b189da2e314851a67a4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/b035ecac4a214b189da2e314851a67a4 2024-11-20T19:27:03,288 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/35a9c2faab8b40feb12c5e0dbae625e8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/35a9c2faab8b40feb12c5e0dbae625e8 2024-11-20T19:27:03,289 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/0b7bdef7d7c1473ea3e5aaa402f06943 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/0b7bdef7d7c1473ea3e5aaa402f06943 2024-11-20T19:27:03,290 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/c90c5a27772a4b14b73b9b558d347792 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/c90c5a27772a4b14b73b9b558d347792 2024-11-20T19:27:03,291 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/90e38d990afb4077b6d75a2e17be0a53 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/90e38d990afb4077b6d75a2e17be0a53 2024-11-20T19:27:03,292 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/835fb0f64c1d454292cc4bf15d9892d8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/835fb0f64c1d454292cc4bf15d9892d8 2024-11-20T19:27:03,295 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/dc5836da34554fb6a110d04600b4b142 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/dc5836da34554fb6a110d04600b4b142 2024-11-20T19:27:03,298 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/f111ce7f72f44ed3887ec120cdbc5e89 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/f111ce7f72f44ed3887ec120cdbc5e89 2024-11-20T19:27:03,302 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/02b768ae0d104159b2fe779933659ebd to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/02b768ae0d104159b2fe779933659ebd 2024-11-20T19:27:03,303 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/a0fa089f3701488cbc13e4cd966bb159 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/a0fa089f3701488cbc13e4cd966bb159 2024-11-20T19:27:03,304 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/c350cade0ba743a89e5a0f0da32d683b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/c350cade0ba743a89e5a0f0da32d683b 2024-11-20T19:27:03,307 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/7370602ba552449a9016178c4b2bcf2f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/7370602ba552449a9016178c4b2bcf2f 2024-11-20T19:27:03,310 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/728f27d7a91549609fbff7f8098fa7b7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/728f27d7a91549609fbff7f8098fa7b7 2024-11-20T19:27:03,311 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/13e7885131c04bfa8f46b5d585c1a11a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/13e7885131c04bfa8f46b5d585c1a11a 2024-11-20T19:27:03,318 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/1887f5d52ee74356924f95070f559795 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/1887f5d52ee74356924f95070f559795 2024-11-20T19:27:03,321 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/023327f8b7a343f2a567c0f5dcdbea8b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/c9f99c59da934351ac37f031f416cf05, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/427f1031e65d49f3ae1f24fcef32f122, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/055b1c1c07034169822324e8fdf94fce, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/1b276b62c2ff4f44a835bd42d0187f17, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/40b37fae8b014c3e854fe69010067b8a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/ba045a0c5177444ca1c6b6b1c6e7edc7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/92fe7c9da68044c4b974cb06f8fb3ab9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/5a0e671b0ad544fe9a701976e1243847, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/5c6cb4832a34409498256571d50b040c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/688adba07a3d4e15b1e3623ef0d136b2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/7a66bd9ce4ea49efb1760d135a61e5bb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/24cbbc025b3e456da054ce4085a1bab8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/625208d64160451396281d5ba19f0d18, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/06091869e2264f8caf9300b4289eab3c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/fc6d37936bc24f098e0f5a34cee5b7ab, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/a8d6fa740fc34af6baa171c1ea0eae6e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/8644af4eea1c4361ad4d116dabdb28fb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/51db0dc87b7d424585578bd7134320eb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/9869671c4f534945811c5b1db4afc28c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/7c67d7628371436d8349024976530828, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/453ca8813b3a49daab6249f276a98efa, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/91396aace2eb412b8a72761feaca4dad, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/37bacf6d90424652a2fefc4f2d9daa07] to archive 2024-11-20T19:27:03,323 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:03,325 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/023327f8b7a343f2a567c0f5dcdbea8b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/023327f8b7a343f2a567c0f5dcdbea8b 2024-11-20T19:27:03,326 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/c9f99c59da934351ac37f031f416cf05 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/c9f99c59da934351ac37f031f416cf05 2024-11-20T19:27:03,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T19:27:03,330 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/427f1031e65d49f3ae1f24fcef32f122 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/427f1031e65d49f3ae1f24fcef32f122 2024-11-20T19:27:03,333 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/055b1c1c07034169822324e8fdf94fce to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/055b1c1c07034169822324e8fdf94fce 2024-11-20T19:27:03,334 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/1b276b62c2ff4f44a835bd42d0187f17 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/1b276b62c2ff4f44a835bd42d0187f17 2024-11-20T19:27:03,335 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/40b37fae8b014c3e854fe69010067b8a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/40b37fae8b014c3e854fe69010067b8a 2024-11-20T19:27:03,341 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/ba045a0c5177444ca1c6b6b1c6e7edc7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/ba045a0c5177444ca1c6b6b1c6e7edc7 2024-11-20T19:27:03,341 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/92fe7c9da68044c4b974cb06f8fb3ab9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/92fe7c9da68044c4b974cb06f8fb3ab9 2024-11-20T19:27:03,342 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/5a0e671b0ad544fe9a701976e1243847 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/5a0e671b0ad544fe9a701976e1243847 2024-11-20T19:27:03,345 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/5c6cb4832a34409498256571d50b040c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/5c6cb4832a34409498256571d50b040c 2024-11-20T19:27:03,349 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/688adba07a3d4e15b1e3623ef0d136b2 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/688adba07a3d4e15b1e3623ef0d136b2 2024-11-20T19:27:03,350 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/7a66bd9ce4ea49efb1760d135a61e5bb to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/7a66bd9ce4ea49efb1760d135a61e5bb 2024-11-20T19:27:03,355 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/24cbbc025b3e456da054ce4085a1bab8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/24cbbc025b3e456da054ce4085a1bab8 2024-11-20T19:27:03,359 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/625208d64160451396281d5ba19f0d18 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/625208d64160451396281d5ba19f0d18 2024-11-20T19:27:03,362 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/06091869e2264f8caf9300b4289eab3c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/06091869e2264f8caf9300b4289eab3c 2024-11-20T19:27:03,364 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/fc6d37936bc24f098e0f5a34cee5b7ab to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/fc6d37936bc24f098e0f5a34cee5b7ab 2024-11-20T19:27:03,365 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/a8d6fa740fc34af6baa171c1ea0eae6e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/a8d6fa740fc34af6baa171c1ea0eae6e 2024-11-20T19:27:03,370 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/8644af4eea1c4361ad4d116dabdb28fb to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/8644af4eea1c4361ad4d116dabdb28fb 2024-11-20T19:27:03,372 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/51db0dc87b7d424585578bd7134320eb to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/51db0dc87b7d424585578bd7134320eb 2024-11-20T19:27:03,375 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/9869671c4f534945811c5b1db4afc28c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/9869671c4f534945811c5b1db4afc28c 2024-11-20T19:27:03,376 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/7c67d7628371436d8349024976530828 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/7c67d7628371436d8349024976530828 2024-11-20T19:27:03,378 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/453ca8813b3a49daab6249f276a98efa to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/453ca8813b3a49daab6249f276a98efa 2024-11-20T19:27:03,378 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:03,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T19:27:03,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:03,379 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T19:27:03,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:27:03,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:03,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:27:03,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:03,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:27:03,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:03,381 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/91396aace2eb412b8a72761feaca4dad to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/91396aace2eb412b8a72761feaca4dad 2024-11-20T19:27:03,384 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/37bacf6d90424652a2fefc4f2d9daa07 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/37bacf6d90424652a2fefc4f2d9daa07 2024-11-20T19:27:03,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e475447d1409428a965009a53b73d6d8_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130822830/Put/seqid=0 2024-11-20T19:27:03,405 DEBUG [Thread-1709 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683f8469 to 127.0.0.1:49985 2024-11-20T19:27:03,405 DEBUG [Thread-1709 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:03,406 DEBUG [Thread-1711 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e4d3d0 to 127.0.0.1:49985 2024-11-20T19:27:03,406 DEBUG [Thread-1711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:03,412 DEBUG [Thread-1703 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x167a78b0 to 127.0.0.1:49985 2024-11-20T19:27:03,412 DEBUG [Thread-1705 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5aee939b to 127.0.0.1:49985 2024-11-20T19:27:03,412 DEBUG [Thread-1703 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:03,413 DEBUG [Thread-1705 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:03,415 DEBUG [Thread-1707 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f49665c to 127.0.0.1:49985 2024-11-20T19:27:03,415 DEBUG [Thread-1707 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:03,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742311_1487 (size=12454) 2024-11-20T19:27:03,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:03,431 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e475447d1409428a965009a53b73d6d8_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e475447d1409428a965009a53b73d6d8_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:03,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/ce410a6344b844f9bda0aaf992e58121, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:27:03,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/ce410a6344b844f9bda0aaf992e58121 is 175, key is test_row_0/A:col10/1732130822830/Put/seqid=0 2024-11-20T19:27:03,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742312_1488 (size=31255) 2024-11-20T19:27:03,445 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=394, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/ce410a6344b844f9bda0aaf992e58121 2024-11-20T19:27:03,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/19dff7f4676e4f35b66ae4fd21e8ce8f is 50, key is test_row_0/B:col10/1732130822830/Put/seqid=0 2024-11-20T19:27:03,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742313_1489 (size=12301) 2024-11-20T19:27:03,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:03,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. as already flushing 2024-11-20T19:27:03,460 DEBUG [Thread-1698 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ec15031 to 127.0.0.1:49985 2024-11-20T19:27:03,460 DEBUG [Thread-1698 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:03,461 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/19dff7f4676e4f35b66ae4fd21e8ce8f 2024-11-20T19:27:03,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/273fe17c643e4941b8ec3d84786fb7cd is 50, key is test_row_0/C:col10/1732130822830/Put/seqid=0 2024-11-20T19:27:03,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742314_1490 (size=12301) 2024-11-20T19:27:03,482 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/273fe17c643e4941b8ec3d84786fb7cd 2024-11-20T19:27:03,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/ce410a6344b844f9bda0aaf992e58121 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/ce410a6344b844f9bda0aaf992e58121 2024-11-20T19:27:03,491 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/ce410a6344b844f9bda0aaf992e58121, entries=150, sequenceid=394, filesize=30.5 K 2024-11-20T19:27:03,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/19dff7f4676e4f35b66ae4fd21e8ce8f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/19dff7f4676e4f35b66ae4fd21e8ce8f 2024-11-20T19:27:03,497 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/19dff7f4676e4f35b66ae4fd21e8ce8f, entries=150, sequenceid=394, filesize=12.0 K 2024-11-20T19:27:03,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/273fe17c643e4941b8ec3d84786fb7cd as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/273fe17c643e4941b8ec3d84786fb7cd 2024-11-20T19:27:03,504 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/273fe17c643e4941b8ec3d84786fb7cd, entries=150, sequenceid=394, filesize=12.0 K 2024-11-20T19:27:03,505 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=6.71 KB/6870 for 2d2917374eb6d0879b57e7f84a3c009e in 126ms, sequenceid=394, compaction requested=true 2024-11-20T19:27:03,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:27:03,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:03,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-20T19:27:03,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-20T19:27:03,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-20T19:27:03,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 279 msec 2024-11-20T19:27:03,507 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 281 msec 2024-11-20T19:27:03,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T19:27:03,529 INFO [Thread-1702 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-20T19:27:07,000 DEBUG [Thread-1696 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b6cf8cb to 127.0.0.1:49985 2024-11-20T19:27:07,000 DEBUG [Thread-1696 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:07,002 DEBUG [Thread-1692 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b976e1a to 127.0.0.1:49985 2024-11-20T19:27:07,002 DEBUG [Thread-1692 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:07,003 DEBUG [Thread-1694 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b82ba2a to 127.0.0.1:49985 2024-11-20T19:27:07,003 DEBUG [Thread-1694 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:07,011 DEBUG [Thread-1700 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dd5b441 to 127.0.0.1:49985 2024-11-20T19:27:07,011 DEBUG [Thread-1700 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:07,011 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 80 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1688 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5063 rows 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1690 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5069 rows 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1682 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5045 rows 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1684 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5051 rows 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1706 2024-11-20T19:27:07,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5117 rows 2024-11-20T19:27:07,012 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:27:07,012 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68ad882f to 127.0.0.1:49985 2024-11-20T19:27:07,012 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:07,016 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T19:27:07,017 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T19:27:07,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:07,020 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130827019"}]},"ts":"1732130827019"} 2024-11-20T19:27:07,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T19:27:07,022 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T19:27:07,058 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T19:27:07,059 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:27:07,060 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d2917374eb6d0879b57e7f84a3c009e, UNASSIGN}] 2024-11-20T19:27:07,061 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d2917374eb6d0879b57e7f84a3c009e, UNASSIGN 2024-11-20T19:27:07,061 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=2d2917374eb6d0879b57e7f84a3c009e, regionState=CLOSING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:07,062 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:27:07,062 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; CloseRegionProcedure 2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:27:07,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T19:27:07,213 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:07,214 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(124): Close 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:07,214 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:27:07,214 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1681): Closing 2d2917374eb6d0879b57e7f84a3c009e, disabling compactions & flushes 2024-11-20T19:27:07,214 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:07,214 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:07,214 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. after waiting 0 ms 2024-11-20T19:27:07,214 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:07,214 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(2837): Flushing 2d2917374eb6d0879b57e7f84a3c009e 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T19:27:07,214 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=A 2024-11-20T19:27:07,214 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:07,214 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=B 2024-11-20T19:27:07,215 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:07,215 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d2917374eb6d0879b57e7f84a3c009e, store=C 2024-11-20T19:27:07,215 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:07,220 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207dfc9078441d4b11a37eda9d47084532_2d2917374eb6d0879b57e7f84a3c009e is 50, key is test_row_0/A:col10/1732130827010/Put/seqid=0 2024-11-20T19:27:07,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742315_1491 (size=9914) 2024-11-20T19:27:07,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T19:27:07,624 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:07,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T19:27:07,628 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207dfc9078441d4b11a37eda9d47084532_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207dfc9078441d4b11a37eda9d47084532_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:07,629 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/e76d6e8a1a1f43a788fdbecad3ac021f, store: [table=TestAcidGuarantees family=A region=2d2917374eb6d0879b57e7f84a3c009e] 2024-11-20T19:27:07,629 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/e76d6e8a1a1f43a788fdbecad3ac021f is 175, key is test_row_0/A:col10/1732130827010/Put/seqid=0 2024-11-20T19:27:07,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742316_1492 (size=22561) 2024-11-20T19:27:08,033 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=402, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/e76d6e8a1a1f43a788fdbecad3ac021f 2024-11-20T19:27:08,039 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/8f7d7897190c40f7bdaa075c74f968d3 is 50, key is test_row_0/B:col10/1732130827010/Put/seqid=0 2024-11-20T19:27:08,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742317_1493 (size=9857) 2024-11-20T19:27:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T19:27:08,442 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/8f7d7897190c40f7bdaa075c74f968d3 2024-11-20T19:27:08,447 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/0033f26cd3f04e3785eafe27ac554a25 is 50, key is test_row_0/C:col10/1732130827010/Put/seqid=0 2024-11-20T19:27:08,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742318_1494 (size=9857) 2024-11-20T19:27:08,851 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/0033f26cd3f04e3785eafe27ac554a25 2024-11-20T19:27:08,854 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/A/e76d6e8a1a1f43a788fdbecad3ac021f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/e76d6e8a1a1f43a788fdbecad3ac021f 2024-11-20T19:27:08,857 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/e76d6e8a1a1f43a788fdbecad3ac021f, entries=100, sequenceid=402, filesize=22.0 K 2024-11-20T19:27:08,858 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/B/8f7d7897190c40f7bdaa075c74f968d3 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/8f7d7897190c40f7bdaa075c74f968d3 2024-11-20T19:27:08,861 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/8f7d7897190c40f7bdaa075c74f968d3, entries=100, sequenceid=402, filesize=9.6 K 2024-11-20T19:27:08,862 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/.tmp/C/0033f26cd3f04e3785eafe27ac554a25 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/0033f26cd3f04e3785eafe27ac554a25 2024-11-20T19:27:08,864 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/0033f26cd3f04e3785eafe27ac554a25, entries=100, sequenceid=402, filesize=9.6 K 2024-11-20T19:27:08,865 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 2d2917374eb6d0879b57e7f84a3c009e in 1651ms, sequenceid=402, compaction requested=true 2024-11-20T19:27:08,880 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/recovered.edits/405.seqid, newMaxSeqId=405, maxSeqId=4 2024-11-20T19:27:08,881 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e. 2024-11-20T19:27:08,881 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1635): Region close journal for 2d2917374eb6d0879b57e7f84a3c009e: 2024-11-20T19:27:08,882 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(170): Closed 2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:08,883 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=2d2917374eb6d0879b57e7f84a3c009e, regionState=CLOSED 2024-11-20T19:27:08,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-20T19:27:08,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; CloseRegionProcedure 2d2917374eb6d0879b57e7f84a3c009e, server=db9c3a6c6492,41229,1732130701496 in 1.8210 sec 2024-11-20T19:27:08,887 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=119 2024-11-20T19:27:08,887 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=119, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d2917374eb6d0879b57e7f84a3c009e, UNASSIGN in 1.8240 sec 2024-11-20T19:27:08,890 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-20T19:27:08,890 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8290 sec 2024-11-20T19:27:08,891 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130828891"}]},"ts":"1732130828891"} 2024-11-20T19:27:08,891 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T19:27:08,900 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T19:27:08,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8840 sec 2024-11-20T19:27:09,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T19:27:09,126 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-20T19:27:09,126 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T19:27:09,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:09,128 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=122, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:09,128 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=122, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:09,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T19:27:09,129 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,131 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/recovered.edits] 2024-11-20T19:27:09,132 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1a271edbeb8647268d7af94622447611 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/1a271edbeb8647268d7af94622447611 2024-11-20T19:27:09,134 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/6dd088a9f99347aa8cc5868ea2c191a9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/6dd088a9f99347aa8cc5868ea2c191a9 2024-11-20T19:27:09,135 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/6f3f6b0ab50f4022b529d2c44745a873 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/6f3f6b0ab50f4022b529d2c44745a873 2024-11-20T19:27:09,136 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/ce410a6344b844f9bda0aaf992e58121 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/ce410a6344b844f9bda0aaf992e58121 2024-11-20T19:27:09,136 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/e76d6e8a1a1f43a788fdbecad3ac021f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/A/e76d6e8a1a1f43a788fdbecad3ac021f 2024-11-20T19:27:09,139 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/19dff7f4676e4f35b66ae4fd21e8ce8f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/19dff7f4676e4f35b66ae4fd21e8ce8f 2024-11-20T19:27:09,140 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/8847c5083fa24f8a924c90d831b28a3a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/8847c5083fa24f8a924c90d831b28a3a 2024-11-20T19:27:09,141 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/8f7d7897190c40f7bdaa075c74f968d3 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/8f7d7897190c40f7bdaa075c74f968d3 2024-11-20T19:27:09,142 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/ed6c78954ffc42bdb1f46745e4b68bf3 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/ed6c78954ffc42bdb1f46745e4b68bf3 2024-11-20T19:27:09,144 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/f87b30cc81604c2cb88e0c1c0e95cfc6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/B/f87b30cc81604c2cb88e0c1c0e95cfc6 2024-11-20T19:27:09,145 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/0033f26cd3f04e3785eafe27ac554a25 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/0033f26cd3f04e3785eafe27ac554a25 2024-11-20T19:27:09,146 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/0db37a697c974c57947c311d7928018a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/0db37a697c974c57947c311d7928018a 2024-11-20T19:27:09,147 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/273fe17c643e4941b8ec3d84786fb7cd to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/273fe17c643e4941b8ec3d84786fb7cd 2024-11-20T19:27:09,148 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/53a090d586124115b1e8c210a1cc2f73 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/53a090d586124115b1e8c210a1cc2f73 2024-11-20T19:27:09,150 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/d89dc62af532431ca4e6774ea0f3dfc6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/C/d89dc62af532431ca4e6774ea0f3dfc6 2024-11-20T19:27:09,152 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/recovered.edits/405.seqid to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e/recovered.edits/405.seqid 2024-11-20T19:27:09,152 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,152 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T19:27:09,152 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T19:27:09,153 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T19:27:09,156 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200ade4a91261744adbd8221474c378884_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200ade4a91261744adbd8221474c378884_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,157 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200bbda53d21424d7f87cff33db9233e8e_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200bbda53d21424d7f87cff33db9233e8e_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,158 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201f1f0c0867db40d1bb74b8a2416c1fe0_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201f1f0c0867db40d1bb74b8a2416c1fe0_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,159 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201ff7aa34698a4e8e85bf3503958acbf8_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201ff7aa34698a4e8e85bf3503958acbf8_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,159 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202b89e629d5aa40ea8e06ee8630885dbe_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202b89e629d5aa40ea8e06ee8630885dbe_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,160 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f2aac6a4c404783934e8d4a123e7931_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f2aac6a4c404783934e8d4a123e7931_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,161 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203052fba14c3c49a3b05e0d71eb5e89f9_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203052fba14c3c49a3b05e0d71eb5e89f9_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,162 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120376e0c988a3e4d41baeacf4f18151403_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120376e0c988a3e4d41baeacf4f18151403_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,162 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112049e3106d9508447aa3bf4724ce755b9d_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112049e3106d9508447aa3bf4724ce755b9d_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,163 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205607fd21bcb146b4a2f3ccea944f8bbc_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205607fd21bcb146b4a2f3ccea944f8bbc_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,164 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120723d57aeb54f4d5abccd0ae1b539423f_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120723d57aeb54f4d5abccd0ae1b539423f_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,165 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207dfc9078441d4b11a37eda9d47084532_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207dfc9078441d4b11a37eda9d47084532_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,165 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120897b28dafc5f462db931c0c42031e2dd_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120897b28dafc5f462db931c0c42031e2dd_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,166 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112095fb826c47374ef6a70777641e867e51_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112095fb826c47374ef6a70777641e867e51_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,167 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a666e1372fbc4631b1bd512a921d762b_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a666e1372fbc4631b1bd512a921d762b_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,168 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120acc1ef23cf5345ad9d65568fa33cf54d_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120acc1ef23cf5345ad9d65568fa33cf54d_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,168 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b82db3eebf1b46e881646e2b3bda5b21_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b82db3eebf1b46e881646e2b3bda5b21_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,169 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cdc3059c45394e46bd2c0c7892a1e0d4_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cdc3059c45394e46bd2c0c7892a1e0d4_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,170 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e475447d1409428a965009a53b73d6d8_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e475447d1409428a965009a53b73d6d8_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,171 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ea6933042f24452d9704048251f940e2_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ea6933042f24452d9704048251f940e2_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,171 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120eae8d53b6e2e4e3481ca5f8f85289454_2d2917374eb6d0879b57e7f84a3c009e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120eae8d53b6e2e4e3481ca5f8f85289454_2d2917374eb6d0879b57e7f84a3c009e 2024-11-20T19:27:09,172 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T19:27:09,173 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=122, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:09,175 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T19:27:09,176 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T19:27:09,177 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=122, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:09,177 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T19:27:09,177 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732130829177"}]},"ts":"9223372036854775807"} 2024-11-20T19:27:09,178 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T19:27:09,178 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2d2917374eb6d0879b57e7f84a3c009e, NAME => 'TestAcidGuarantees,,1732130800091.2d2917374eb6d0879b57e7f84a3c009e.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T19:27:09,178 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T19:27:09,178 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732130829178"}]},"ts":"9223372036854775807"} 2024-11-20T19:27:09,180 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T19:27:09,189 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=122, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:09,190 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 63 msec 2024-11-20T19:27:09,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T19:27:09,229 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-20T19:27:09,240 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=238 (was 239), OpenFileDescriptor=456 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=662 (was 646) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3232 (was 3320) 2024-11-20T19:27:09,249 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=238, OpenFileDescriptor=456, MaxFileDescriptor=1048576, SystemLoadAverage=662, ProcessCount=11, AvailableMemoryMB=3232 2024-11-20T19:27:09,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:27:09,251 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:27:09,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=123, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:09,252 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:27:09,252 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:09,252 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 123 2024-11-20T19:27:09,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-20T19:27:09,253 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:27:09,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742319_1495 (size=960) 2024-11-20T19:27:09,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-20T19:27:09,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-20T19:27:09,660 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203 2024-11-20T19:27:09,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742320_1496 (size=53) 2024-11-20T19:27:09,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-20T19:27:10,065 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:27:10,065 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing ddf3bf4b0d5353d829b30f0de5c7c11a, disabling compactions & flushes 2024-11-20T19:27:10,065 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:10,065 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:10,066 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. after waiting 0 ms 2024-11-20T19:27:10,066 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:10,066 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:10,066 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:10,067 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:27:10,067 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732130830067"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130830067"}]},"ts":"1732130830067"} 2024-11-20T19:27:10,068 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:27:10,069 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:27:10,069 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130830069"}]},"ts":"1732130830069"} 2024-11-20T19:27:10,070 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T19:27:10,092 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ddf3bf4b0d5353d829b30f0de5c7c11a, ASSIGN}] 2024-11-20T19:27:10,093 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ddf3bf4b0d5353d829b30f0de5c7c11a, ASSIGN 2024-11-20T19:27:10,093 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ddf3bf4b0d5353d829b30f0de5c7c11a, ASSIGN; state=OFFLINE, location=db9c3a6c6492,41229,1732130701496; forceNewPlan=false, retain=false 2024-11-20T19:27:10,244 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=ddf3bf4b0d5353d829b30f0de5c7c11a, regionState=OPENING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:10,245 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; OpenRegionProcedure ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:27:10,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-20T19:27:10,396 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:10,398 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:10,398 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7285): Opening region: {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:27:10,399 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:10,399 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:27:10,399 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7327): checking encryption for ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:10,399 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7330): checking classloading for ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:10,400 INFO [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:10,401 INFO [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:10,402 INFO [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ddf3bf4b0d5353d829b30f0de5c7c11a columnFamilyName A 2024-11-20T19:27:10,402 DEBUG [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:10,402 INFO [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] regionserver.HStore(327): Store=ddf3bf4b0d5353d829b30f0de5c7c11a/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:10,402 INFO [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:10,404 INFO [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:10,404 INFO [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ddf3bf4b0d5353d829b30f0de5c7c11a columnFamilyName B 2024-11-20T19:27:10,404 DEBUG [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:10,405 INFO [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] regionserver.HStore(327): Store=ddf3bf4b0d5353d829b30f0de5c7c11a/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:10,405 INFO [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:10,407 INFO [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:10,407 INFO [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ddf3bf4b0d5353d829b30f0de5c7c11a columnFamilyName C 2024-11-20T19:27:10,407 DEBUG [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:10,407 INFO [StoreOpener-ddf3bf4b0d5353d829b30f0de5c7c11a-1 {}] regionserver.HStore(327): Store=ddf3bf4b0d5353d829b30f0de5c7c11a/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:10,407 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:10,408 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:10,408 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:10,410 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:27:10,412 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1085): writing seq id for ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:10,420 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:27:10,421 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1102): Opened ddf3bf4b0d5353d829b30f0de5c7c11a; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68952641, jitterRate=0.02747441828250885}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:27:10,422 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1001): Region open journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:10,422 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., pid=125, masterSystemTime=1732130830396 2024-11-20T19:27:10,424 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:10,424 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:10,424 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=ddf3bf4b0d5353d829b30f0de5c7c11a, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:10,426 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-20T19:27:10,426 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; OpenRegionProcedure ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 in 180 msec 2024-11-20T19:27:10,427 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-11-20T19:27:10,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ddf3bf4b0d5353d829b30f0de5c7c11a, ASSIGN in 334 msec 2024-11-20T19:27:10,428 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:27:10,428 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130830428"}]},"ts":"1732130830428"} 2024-11-20T19:27:10,429 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T19:27:10,439 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:27:10,440 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1880 sec 2024-11-20T19:27:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-20T19:27:11,357 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 123 completed 2024-11-20T19:27:11,358 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3fa53591 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3cb726fe 2024-11-20T19:27:11,401 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59bd764a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:11,402 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:11,403 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58952, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:11,404 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:27:11,405 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:27:11,406 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3512017b to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@301741f1 2024-11-20T19:27:11,417 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22a6e9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:11,418 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x695c2253 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63cefe40 2024-11-20T19:27:11,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32c12a30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:11,426 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7177efc9 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65df2359 2024-11-20T19:27:11,434 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ef40578, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:11,435 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61d38088 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d0ab200 2024-11-20T19:27:11,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32bb71c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:11,443 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7043f683 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5871c039 2024-11-20T19:27:11,456 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bc0f7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:11,457 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7daa5922 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b8b6e04 2024-11-20T19:27:11,467 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ed69825, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:11,468 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b7f20c4 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc486e1 2024-11-20T19:27:11,481 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11193a0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:11,481 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f7c40ba to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2070263a 2024-11-20T19:27:11,492 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7861b162, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:11,493 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41b0e7b6 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6050584c 2024-11-20T19:27:11,509 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154f0f85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:11,511 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f2423f3 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6dd48863 2024-11-20T19:27:11,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a917b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:11,543 DEBUG [hconnection-0x33260382-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:11,544 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:11,549 DEBUG [hconnection-0x4580fb5e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:11,551 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:11,551 DEBUG [hconnection-0x681951f6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:11,553 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:11,555 DEBUG [hconnection-0x22ac90e2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:11,556 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59000, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:11,592 DEBUG [hconnection-0x14e62074-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:11,592 DEBUG [hconnection-0x3b009a83-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:11,593 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59002, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:11,596 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:11,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:11,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:27:11,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:11,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:11,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:11,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:11,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:11,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:11,613 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:11,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-20T19:27:11,614 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:11,615 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:11,615 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:11,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T19:27:11,635 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/d9ce0b66c5b14e2db8dac624d3399986 is 50, key is test_row_0/A:col10/1732130831597/Put/seqid=0 2024-11-20T19:27:11,636 DEBUG [hconnection-0x1d07b0e3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:11,638 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59020, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:11,651 DEBUG [hconnection-0x50faf3af-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:11,652 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59036, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:11,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130891665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130891665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130891667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130891668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T19:27:11,724 DEBUG [hconnection-0x28043e9f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:11,725 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59050, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:11,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59050 deadline: 1732130891727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742321_1497 (size=14341) 2024-11-20T19:27:11,757 DEBUG [hconnection-0x43f0e0f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:11,758 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:11,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130891767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130891767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130891768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,771 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:11,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130891775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:11,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:11,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:11,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:11,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:11,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:11,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59050 deadline: 1732130891835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T19:27:11,929 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:11,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:11,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:11,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:11,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:11,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:11,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:11,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130891968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130891969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130891970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:11,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130891978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59050 deadline: 1732130892036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,081 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:12,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:12,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:12,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:12,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/d9ce0b66c5b14e2db8dac624d3399986 2024-11-20T19:27:12,191 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/cb28c1a21d044b10b68854097b2ed8eb is 50, key is test_row_0/B:col10/1732130831597/Put/seqid=0 2024-11-20T19:27:12,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T19:27:12,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742322_1498 (size=12001) 2024-11-20T19:27:12,234 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/cb28c1a21d044b10b68854097b2ed8eb 2024-11-20T19:27:12,234 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:12,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:12,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:12,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:12,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/7697a16a7ad2472386b41ced7dec6872 is 50, key is test_row_0/C:col10/1732130831597/Put/seqid=0 2024-11-20T19:27:12,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130892269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130892272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742323_1499 (size=12001) 2024-11-20T19:27:12,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130892274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130892281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59050 deadline: 1732130892341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,386 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:12,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:12,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:12,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:12,387 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,540 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:12,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:12,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:12,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:12,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/7697a16a7ad2472386b41ced7dec6872 2024-11-20T19:27:12,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/d9ce0b66c5b14e2db8dac624d3399986 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d9ce0b66c5b14e2db8dac624d3399986 2024-11-20T19:27:12,692 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:12,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:12,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:12,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:12,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d9ce0b66c5b14e2db8dac624d3399986, entries=200, sequenceid=13, filesize=14.0 K 2024-11-20T19:27:12,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/cb28c1a21d044b10b68854097b2ed8eb as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/cb28c1a21d044b10b68854097b2ed8eb 2024-11-20T19:27:12,705 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/cb28c1a21d044b10b68854097b2ed8eb, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T19:27:12,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/7697a16a7ad2472386b41ced7dec6872 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7697a16a7ad2472386b41ced7dec6872 2024-11-20T19:27:12,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T19:27:12,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7697a16a7ad2472386b41ced7dec6872, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T19:27:12,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for ddf3bf4b0d5353d829b30f0de5c7c11a in 1125ms, sequenceid=13, compaction requested=false 2024-11-20T19:27:12,722 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T19:27:12,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:12,778 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T19:27:12,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:12,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:12,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:12,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:12,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:12,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:12,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:12,804 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/1b1c10a55489451aa830442e0def28f7 is 50, key is test_row_0/A:col10/1732130831663/Put/seqid=0 2024-11-20T19:27:12,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742324_1500 (size=14341) 2024-11-20T19:27:12,825 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/1b1c10a55489451aa830442e0def28f7 2024-11-20T19:27:12,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/8c441eb4c5404a119bd08109ef6b47a4 is 50, key is test_row_0/B:col10/1732130831663/Put/seqid=0 2024-11-20T19:27:12,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742325_1501 (size=12001) 2024-11-20T19:27:12,842 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/8c441eb4c5404a119bd08109ef6b47a4 2024-11-20T19:27:12,845 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:12,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130892795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:12,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130892846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130892846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130892850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59050 deadline: 1732130892850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:12,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/81ddbc842b064777a4ef0cfc3c924c2e is 50, key is test_row_0/C:col10/1732130831663/Put/seqid=0 2024-11-20T19:27:12,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742326_1502 (size=12001) 2024-11-20T19:27:12,911 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/81ddbc842b064777a4ef0cfc3c924c2e 2024-11-20T19:27:12,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/1b1c10a55489451aa830442e0def28f7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/1b1c10a55489451aa830442e0def28f7 2024-11-20T19:27:12,926 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/1b1c10a55489451aa830442e0def28f7, entries=200, sequenceid=40, filesize=14.0 K 2024-11-20T19:27:12,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/8c441eb4c5404a119bd08109ef6b47a4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/8c441eb4c5404a119bd08109ef6b47a4 2024-11-20T19:27:12,932 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/8c441eb4c5404a119bd08109ef6b47a4, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T19:27:12,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/81ddbc842b064777a4ef0cfc3c924c2e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/81ddbc842b064777a4ef0cfc3c924c2e 2024-11-20T19:27:12,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/81ddbc842b064777a4ef0cfc3c924c2e, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T19:27:12,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=53.67 KB/54960 for ddf3bf4b0d5353d829b30f0de5c7c11a in 162ms, sequenceid=40, compaction requested=false 2024-11-20T19:27:12,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:12,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:27:12,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:12,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:12,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:12,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:12,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:12,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:12,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/586aa0ba38c94ce8a51abfbc35b68c0e is 50, key is test_row_0/A:col10/1732130832949/Put/seqid=0 2024-11-20T19:27:12,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742327_1503 (size=14341) 2024-11-20T19:27:12,985 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/586aa0ba38c94ce8a51abfbc35b68c0e 2024-11-20T19:27:13,000 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:13,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:13,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/3e1f430fe67649519f7d3cadda9d9c08 is 50, key is test_row_0/B:col10/1732130832949/Put/seqid=0 2024-11-20T19:27:13,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130893010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130893011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742328_1504 (size=12001) 2024-11-20T19:27:13,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/3e1f430fe67649519f7d3cadda9d9c08 2024-11-20T19:27:13,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130893018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130893018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/16d74abb7e8c4d3db2a982c48b9427c3 is 50, key is test_row_0/C:col10/1732130832949/Put/seqid=0 2024-11-20T19:27:13,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742329_1505 (size=12001) 2024-11-20T19:27:13,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/16d74abb7e8c4d3db2a982c48b9427c3 2024-11-20T19:27:13,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/586aa0ba38c94ce8a51abfbc35b68c0e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/586aa0ba38c94ce8a51abfbc35b68c0e 2024-11-20T19:27:13,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/586aa0ba38c94ce8a51abfbc35b68c0e, entries=200, sequenceid=52, filesize=14.0 K 2024-11-20T19:27:13,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/3e1f430fe67649519f7d3cadda9d9c08 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/3e1f430fe67649519f7d3cadda9d9c08 2024-11-20T19:27:13,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/3e1f430fe67649519f7d3cadda9d9c08, entries=150, sequenceid=52, filesize=11.7 K 2024-11-20T19:27:13,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/16d74abb7e8c4d3db2a982c48b9427c3 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/16d74abb7e8c4d3db2a982c48b9427c3 2024-11-20T19:27:13,084 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/16d74abb7e8c4d3db2a982c48b9427c3, entries=150, sequenceid=52, filesize=11.7 K 2024-11-20T19:27:13,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ddf3bf4b0d5353d829b30f0de5c7c11a in 133ms, sequenceid=52, compaction requested=true 2024-11-20T19:27:13,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:13,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:13,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:13,085 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:13,085 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:13,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:13,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:13,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:13,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:13,087 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:13,087 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/B is initiating minor compaction (all files) 2024-11-20T19:27:13,087 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/B in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,087 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/cb28c1a21d044b10b68854097b2ed8eb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/8c441eb4c5404a119bd08109ef6b47a4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/3e1f430fe67649519f7d3cadda9d9c08] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=35.2 K 2024-11-20T19:27:13,088 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:13,088 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/A is initiating minor compaction (all files) 2024-11-20T19:27:13,088 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/A in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,088 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d9ce0b66c5b14e2db8dac624d3399986, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/1b1c10a55489451aa830442e0def28f7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/586aa0ba38c94ce8a51abfbc35b68c0e] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=42.0 K 2024-11-20T19:27:13,088 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting cb28c1a21d044b10b68854097b2ed8eb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732130831555 2024-11-20T19:27:13,090 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9ce0b66c5b14e2db8dac624d3399986, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732130831555 2024-11-20T19:27:13,090 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c441eb4c5404a119bd08109ef6b47a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732130831663 2024-11-20T19:27:13,090 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b1c10a55489451aa830442e0def28f7, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732130831663 2024-11-20T19:27:13,090 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e1f430fe67649519f7d3cadda9d9c08, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130832800 2024-11-20T19:27:13,090 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 586aa0ba38c94ce8a51abfbc35b68c0e, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130832793 2024-11-20T19:27:13,104 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#B#compaction#417 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:13,105 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/0775e61840614dfba4bbd29e4fdc8644 is 50, key is test_row_0/B:col10/1732130832949/Put/seqid=0 2024-11-20T19:27:13,119 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#A#compaction#418 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:13,120 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/ceed9f84ed9f4208a9b27e5bea685dc6 is 50, key is test_row_0/A:col10/1732130832949/Put/seqid=0 2024-11-20T19:27:13,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:13,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:27:13,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:13,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:13,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:13,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:13,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:13,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:13,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/27eb932721674e9fa8c93c31e7a0ec90 is 50, key is test_row_0/A:col10/1732130833125/Put/seqid=0 2024-11-20T19:27:13,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742330_1506 (size=12104) 2024-11-20T19:27:13,152 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:13,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:13,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,157 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130893144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130893150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130893155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130893157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742331_1507 (size=12104) 2024-11-20T19:27:13,205 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/ceed9f84ed9f4208a9b27e5bea685dc6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/ceed9f84ed9f4208a9b27e5bea685dc6 2024-11-20T19:27:13,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742332_1508 (size=14341) 2024-11-20T19:27:13,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/27eb932721674e9fa8c93c31e7a0ec90 2024-11-20T19:27:13,219 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/A of ddf3bf4b0d5353d829b30f0de5c7c11a into ceed9f84ed9f4208a9b27e5bea685dc6(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:13,219 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:13,219 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/A, priority=13, startTime=1732130833085; duration=0sec 2024-11-20T19:27:13,219 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:13,219 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:A 2024-11-20T19:27:13,219 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:13,220 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:13,220 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/C is initiating minor compaction (all files) 2024-11-20T19:27:13,220 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/C in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,220 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7697a16a7ad2472386b41ced7dec6872, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/81ddbc842b064777a4ef0cfc3c924c2e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/16d74abb7e8c4d3db2a982c48b9427c3] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=35.2 K 2024-11-20T19:27:13,221 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7697a16a7ad2472386b41ced7dec6872, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732130831555 2024-11-20T19:27:13,221 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81ddbc842b064777a4ef0cfc3c924c2e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732130831663 2024-11-20T19:27:13,221 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16d74abb7e8c4d3db2a982c48b9427c3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130832800 2024-11-20T19:27:13,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/ba2a3bfde5e14681ae0112d04e0fc492 is 50, key is test_row_0/B:col10/1732130833125/Put/seqid=0 2024-11-20T19:27:13,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742333_1509 (size=12001) 2024-11-20T19:27:13,254 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#C#compaction#421 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:13,255 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/dee60cebf98c4feb8486b35213351a27 is 50, key is test_row_0/C:col10/1732130832949/Put/seqid=0 2024-11-20T19:27:13,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130893258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130893262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130893262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130893269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742334_1510 (size=12104) 2024-11-20T19:27:13,291 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/dee60cebf98c4feb8486b35213351a27 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/dee60cebf98c4feb8486b35213351a27 2024-11-20T19:27:13,302 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/C of ddf3bf4b0d5353d829b30f0de5c7c11a into dee60cebf98c4feb8486b35213351a27(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:13,302 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:13,302 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/C, priority=13, startTime=1732130833085; duration=0sec 2024-11-20T19:27:13,302 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:13,302 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:C 2024-11-20T19:27:13,305 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:13,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:13,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,459 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,459 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:13,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:13,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130893469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130893472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130893473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130893479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,585 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/0775e61840614dfba4bbd29e4fdc8644 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/0775e61840614dfba4bbd29e4fdc8644 2024-11-20T19:27:13,590 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/B of ddf3bf4b0d5353d829b30f0de5c7c11a into 0775e61840614dfba4bbd29e4fdc8644(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:13,590 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:13,590 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/B, priority=13, startTime=1732130833085; duration=0sec 2024-11-20T19:27:13,590 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:13,590 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:B 2024-11-20T19:27:13,611 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:13,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:13,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/ba2a3bfde5e14681ae0112d04e0fc492 2024-11-20T19:27:13,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/58272e5e02cc496ba6d4f0ad77c7f6f8 is 50, key is test_row_0/C:col10/1732130833125/Put/seqid=0 2024-11-20T19:27:13,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742335_1511 (size=12001) 2024-11-20T19:27:13,693 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/58272e5e02cc496ba6d4f0ad77c7f6f8 2024-11-20T19:27:13,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/27eb932721674e9fa8c93c31e7a0ec90 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/27eb932721674e9fa8c93c31e7a0ec90 2024-11-20T19:27:13,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/27eb932721674e9fa8c93c31e7a0ec90, entries=200, sequenceid=77, filesize=14.0 K 2024-11-20T19:27:13,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/ba2a3bfde5e14681ae0112d04e0fc492 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/ba2a3bfde5e14681ae0112d04e0fc492 2024-11-20T19:27:13,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/ba2a3bfde5e14681ae0112d04e0fc492, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T19:27:13,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/58272e5e02cc496ba6d4f0ad77c7f6f8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/58272e5e02cc496ba6d4f0ad77c7f6f8 2024-11-20T19:27:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T19:27:13,722 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/58272e5e02cc496ba6d4f0ad77c7f6f8, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T19:27:13,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for ddf3bf4b0d5353d829b30f0de5c7c11a in 599ms, sequenceid=77, compaction requested=false 2024-11-20T19:27:13,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:13,765 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T19:27:13,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:13,766 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:27:13,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:13,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:13,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:13,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:13,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:13,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:13,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/d61d26b481974741bde54b0e813f143f is 50, key is test_row_0/A:col10/1732130833149/Put/seqid=0 2024-11-20T19:27:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:13,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:13,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742336_1512 (size=12001) 2024-11-20T19:27:13,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130893838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130893842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130893843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130893850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59050 deadline: 1732130893865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130893951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130893952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130893953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:13,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130893962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130894160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130894160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130894160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130894168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,225 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/d61d26b481974741bde54b0e813f143f 2024-11-20T19:27:14,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/fa843023fdb04876892ac94ab97410fe is 50, key is test_row_0/B:col10/1732130833149/Put/seqid=0 2024-11-20T19:27:14,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742337_1513 (size=12001) 2024-11-20T19:27:14,268 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/fa843023fdb04876892ac94ab97410fe 2024-11-20T19:27:14,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/072607276d44435d8805c55ec9848ba5 is 50, key is test_row_0/C:col10/1732130833149/Put/seqid=0 2024-11-20T19:27:14,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742338_1514 (size=12001) 2024-11-20T19:27:14,296 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/072607276d44435d8805c55ec9848ba5 2024-11-20T19:27:14,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/d61d26b481974741bde54b0e813f143f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d61d26b481974741bde54b0e813f143f 2024-11-20T19:27:14,305 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d61d26b481974741bde54b0e813f143f, entries=150, sequenceid=92, filesize=11.7 K 2024-11-20T19:27:14,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/fa843023fdb04876892ac94ab97410fe as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/fa843023fdb04876892ac94ab97410fe 2024-11-20T19:27:14,312 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/fa843023fdb04876892ac94ab97410fe, entries=150, sequenceid=92, filesize=11.7 K 2024-11-20T19:27:14,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/072607276d44435d8805c55ec9848ba5 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/072607276d44435d8805c55ec9848ba5 2024-11-20T19:27:14,316 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/072607276d44435d8805c55ec9848ba5, entries=150, sequenceid=92, filesize=11.7 K 2024-11-20T19:27:14,317 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ddf3bf4b0d5353d829b30f0de5c7c11a in 550ms, sequenceid=92, compaction requested=true 2024-11-20T19:27:14,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:14,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:14,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-20T19:27:14,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-20T19:27:14,320 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-20T19:27:14,320 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7040 sec 2024-11-20T19:27:14,322 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 2.7080 sec 2024-11-20T19:27:14,420 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T19:27:14,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:27:14,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:14,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:14,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:14,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:14,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:14,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:14,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:14,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/7b346544fb77426b859d3020cf3bc53f is 50, key is test_row_0/A:col10/1732130834471/Put/seqid=0 2024-11-20T19:27:14,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130894482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130894491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130894494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130894493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742339_1515 (size=16681) 2024-11-20T19:27:14,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/7b346544fb77426b859d3020cf3bc53f 2024-11-20T19:27:14,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/33c5b2ae88894323897e8479667caf9e is 50, key is test_row_0/B:col10/1732130834471/Put/seqid=0 2024-11-20T19:27:14,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130894595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742340_1516 (size=12001) 2024-11-20T19:27:14,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/33c5b2ae88894323897e8479667caf9e 2024-11-20T19:27:14,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130894602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/dc5db976d5944f54ab6834ff40c33d1a is 50, key is test_row_0/C:col10/1732130834471/Put/seqid=0 2024-11-20T19:27:14,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130894615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130894615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742341_1517 (size=12001) 2024-11-20T19:27:14,655 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/dc5db976d5944f54ab6834ff40c33d1a 2024-11-20T19:27:14,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/7b346544fb77426b859d3020cf3bc53f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/7b346544fb77426b859d3020cf3bc53f 2024-11-20T19:27:14,666 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/7b346544fb77426b859d3020cf3bc53f, entries=250, sequenceid=119, filesize=16.3 K 2024-11-20T19:27:14,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/33c5b2ae88894323897e8479667caf9e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/33c5b2ae88894323897e8479667caf9e 2024-11-20T19:27:14,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/33c5b2ae88894323897e8479667caf9e, entries=150, sequenceid=119, filesize=11.7 K 2024-11-20T19:27:14,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/dc5db976d5944f54ab6834ff40c33d1a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/dc5db976d5944f54ab6834ff40c33d1a 2024-11-20T19:27:14,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/dc5db976d5944f54ab6834ff40c33d1a, entries=150, sequenceid=119, filesize=11.7 K 2024-11-20T19:27:14,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for ddf3bf4b0d5353d829b30f0de5c7c11a in 212ms, sequenceid=119, compaction requested=true 2024-11-20T19:27:14,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:14,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:14,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:14,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:14,683 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:14,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:14,683 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:14,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:14,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:14,684 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55127 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:14,684 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/A is initiating minor compaction (all files) 2024-11-20T19:27:14,684 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:14,684 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/A in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:14,684 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/B is initiating minor compaction (all files) 2024-11-20T19:27:14,684 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/B in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:14,685 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/ceed9f84ed9f4208a9b27e5bea685dc6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/27eb932721674e9fa8c93c31e7a0ec90, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d61d26b481974741bde54b0e813f143f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/7b346544fb77426b859d3020cf3bc53f] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=53.8 K 2024-11-20T19:27:14,685 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/0775e61840614dfba4bbd29e4fdc8644, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/ba2a3bfde5e14681ae0112d04e0fc492, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/fa843023fdb04876892ac94ab97410fe, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/33c5b2ae88894323897e8479667caf9e] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=47.0 K 2024-11-20T19:27:14,685 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting ceed9f84ed9f4208a9b27e5bea685dc6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130832800 2024-11-20T19:27:14,685 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 0775e61840614dfba4bbd29e4fdc8644, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130832800 2024-11-20T19:27:14,685 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27eb932721674e9fa8c93c31e7a0ec90, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732130833009 2024-11-20T19:27:14,685 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting ba2a3bfde5e14681ae0112d04e0fc492, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732130833015 2024-11-20T19:27:14,685 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d61d26b481974741bde54b0e813f143f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130833149 2024-11-20T19:27:14,686 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting fa843023fdb04876892ac94ab97410fe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130833149 2024-11-20T19:27:14,686 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b346544fb77426b859d3020cf3bc53f, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732130833840 2024-11-20T19:27:14,686 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 33c5b2ae88894323897e8479667caf9e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732130833849 2024-11-20T19:27:14,708 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#A#compaction#429 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:14,708 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/df8a2db4f80343af84f6a3357a8f2e81 is 50, key is test_row_0/A:col10/1732130834471/Put/seqid=0 2024-11-20T19:27:14,721 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#B#compaction#430 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:14,721 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/a784675ce2fa42cfb7ee49b64f952ba6 is 50, key is test_row_0/B:col10/1732130834471/Put/seqid=0 2024-11-20T19:27:14,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742342_1518 (size=12241) 2024-11-20T19:27:14,774 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/df8a2db4f80343af84f6a3357a8f2e81 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/df8a2db4f80343af84f6a3357a8f2e81 2024-11-20T19:27:14,780 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/A of ddf3bf4b0d5353d829b30f0de5c7c11a into df8a2db4f80343af84f6a3357a8f2e81(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:14,780 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:14,780 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/A, priority=12, startTime=1732130834683; duration=0sec 2024-11-20T19:27:14,780 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:14,780 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:A 2024-11-20T19:27:14,780 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:14,781 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:14,781 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/C is initiating minor compaction (all files) 2024-11-20T19:27:14,781 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/C in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:14,782 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/dee60cebf98c4feb8486b35213351a27, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/58272e5e02cc496ba6d4f0ad77c7f6f8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/072607276d44435d8805c55ec9848ba5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/dc5db976d5944f54ab6834ff40c33d1a] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=47.0 K 2024-11-20T19:27:14,782 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting dee60cebf98c4feb8486b35213351a27, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130832800 2024-11-20T19:27:14,785 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58272e5e02cc496ba6d4f0ad77c7f6f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732130833015 2024-11-20T19:27:14,785 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 072607276d44435d8805c55ec9848ba5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130833149 2024-11-20T19:27:14,786 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc5db976d5944f54ab6834ff40c33d1a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732130833849 2024-11-20T19:27:14,797 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#C#compaction#431 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:14,797 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/125421f46b61444ebd924be91130e944 is 50, key is test_row_0/C:col10/1732130834471/Put/seqid=0 2024-11-20T19:27:14,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742343_1519 (size=12241) 2024-11-20T19:27:14,810 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/a784675ce2fa42cfb7ee49b64f952ba6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/a784675ce2fa42cfb7ee49b64f952ba6 2024-11-20T19:27:14,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:14,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:27:14,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:14,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:14,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:14,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:14,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:14,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:14,818 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/B of ddf3bf4b0d5353d829b30f0de5c7c11a into a784675ce2fa42cfb7ee49b64f952ba6(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:14,818 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:14,818 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/B, priority=12, startTime=1732130834683; duration=0sec 2024-11-20T19:27:14,818 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:14,818 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:B 2024-11-20T19:27:14,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742344_1520 (size=12241) 2024-11-20T19:27:14,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/179b5e5c8e8c4e98ba81ffd420bcb9da is 50, key is test_row_0/A:col10/1732130834804/Put/seqid=0 2024-11-20T19:27:14,830 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/125421f46b61444ebd924be91130e944 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/125421f46b61444ebd924be91130e944 2024-11-20T19:27:14,838 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/C of ddf3bf4b0d5353d829b30f0de5c7c11a into 125421f46b61444ebd924be91130e944(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:14,838 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:14,838 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/C, priority=12, startTime=1732130834683; duration=0sec 2024-11-20T19:27:14,838 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:14,838 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:C 2024-11-20T19:27:14,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742345_1521 (size=14441) 2024-11-20T19:27:14,865 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/179b5e5c8e8c4e98ba81ffd420bcb9da 2024-11-20T19:27:14,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130894874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,885 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/06178cbe2d024b19928bf2db35297965 is 50, key is test_row_0/B:col10/1732130834804/Put/seqid=0 2024-11-20T19:27:14,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130894876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130894878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130894880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742346_1522 (size=12101) 2024-11-20T19:27:14,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/06178cbe2d024b19928bf2db35297965 2024-11-20T19:27:14,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/2f20d7f4cb9a451b877feb68ff02e255 is 50, key is test_row_0/C:col10/1732130834804/Put/seqid=0 2024-11-20T19:27:14,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130894985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130894986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130894987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:14,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130894989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742347_1523 (size=12101) 2024-11-20T19:27:15,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/2f20d7f4cb9a451b877feb68ff02e255 2024-11-20T19:27:15,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/179b5e5c8e8c4e98ba81ffd420bcb9da as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/179b5e5c8e8c4e98ba81ffd420bcb9da 2024-11-20T19:27:15,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/179b5e5c8e8c4e98ba81ffd420bcb9da, entries=200, sequenceid=131, filesize=14.1 K 2024-11-20T19:27:15,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/06178cbe2d024b19928bf2db35297965 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/06178cbe2d024b19928bf2db35297965 2024-11-20T19:27:15,024 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/06178cbe2d024b19928bf2db35297965, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T19:27:15,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/2f20d7f4cb9a451b877feb68ff02e255 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/2f20d7f4cb9a451b877feb68ff02e255 2024-11-20T19:27:15,029 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/2f20d7f4cb9a451b877feb68ff02e255, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T19:27:15,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ddf3bf4b0d5353d829b30f0de5c7c11a in 218ms, sequenceid=131, compaction requested=false 2024-11-20T19:27:15,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:15,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:27:15,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:15,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:15,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:15,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:15,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:15,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:15,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:15,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/eee6d639d19343b287768c53def371ac is 50, key is test_row_0/A:col10/1732130834877/Put/seqid=0 2024-11-20T19:27:15,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742348_1524 (size=14541) 2024-11-20T19:27:15,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130895214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130895214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130895214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130895217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130895326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130895326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130895326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130895328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130895537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130895537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130895538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130895540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,617 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/eee6d639d19343b287768c53def371ac 2024-11-20T19:27:15,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/8a21055d2cd1405c8570a2d680a57832 is 50, key is test_row_0/B:col10/1732130834877/Put/seqid=0 2024-11-20T19:27:15,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742349_1525 (size=12151) 2024-11-20T19:27:15,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T19:27:15,720 INFO [Thread-2203 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-20T19:27:15,721 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:15,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-20T19:27:15,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T19:27:15,726 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:15,726 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:15,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:15,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T19:27:15,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130895847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130895847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130895848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130895849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,877 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T19:27:15,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:15,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:15,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:15,878 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59050 deadline: 1732130895889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:15,895 DEBUG [Thread-2199 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4227 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., hostname=db9c3a6c6492,41229,1732130701496, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:16,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T19:27:16,030 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:16,030 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T19:27:16,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:16,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:16,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:16,030 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,091 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/8a21055d2cd1405c8570a2d680a57832 2024-11-20T19:27:16,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/f76e1e6c1f4645d38634c7aba93cdf2e is 50, key is test_row_0/C:col10/1732130834877/Put/seqid=0 2024-11-20T19:27:16,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742350_1526 (size=12151) 2024-11-20T19:27:16,182 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:16,183 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T19:27:16,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:16,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:16,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:16,183 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T19:27:16,336 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:16,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T19:27:16,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:16,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:16,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:16,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130896352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:16,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130896352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:16,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130896355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:16,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130896357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:16,488 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:16,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T19:27:16,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:16,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:16,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:16,489 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/f76e1e6c1f4645d38634c7aba93cdf2e 2024-11-20T19:27:16,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/eee6d639d19343b287768c53def371ac as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/eee6d639d19343b287768c53def371ac 2024-11-20T19:27:16,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/eee6d639d19343b287768c53def371ac, entries=200, sequenceid=159, filesize=14.2 K 2024-11-20T19:27:16,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/8a21055d2cd1405c8570a2d680a57832 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/8a21055d2cd1405c8570a2d680a57832 2024-11-20T19:27:16,551 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/8a21055d2cd1405c8570a2d680a57832, entries=150, sequenceid=159, filesize=11.9 K 2024-11-20T19:27:16,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/f76e1e6c1f4645d38634c7aba93cdf2e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/f76e1e6c1f4645d38634c7aba93cdf2e 2024-11-20T19:27:16,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/f76e1e6c1f4645d38634c7aba93cdf2e, entries=150, sequenceid=159, filesize=11.9 K 2024-11-20T19:27:16,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ddf3bf4b0d5353d829b30f0de5c7c11a in 1358ms, sequenceid=159, compaction requested=true 2024-11-20T19:27:16,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:16,556 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:16,557 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41223 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:16,557 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/A is initiating minor compaction (all files) 2024-11-20T19:27:16,557 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/A in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:16,557 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/df8a2db4f80343af84f6a3357a8f2e81, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/179b5e5c8e8c4e98ba81ffd420bcb9da, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/eee6d639d19343b287768c53def371ac] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=40.3 K 2024-11-20T19:27:16,558 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting df8a2db4f80343af84f6a3357a8f2e81, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732130833849 2024-11-20T19:27:16,558 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 179b5e5c8e8c4e98ba81ffd420bcb9da, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732130834475 2024-11-20T19:27:16,558 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting eee6d639d19343b287768c53def371ac, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732130834871 2024-11-20T19:27:16,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:16,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:16,563 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:16,563 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:16,563 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/B is initiating minor compaction (all files) 2024-11-20T19:27:16,563 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/B in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:16,563 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/a784675ce2fa42cfb7ee49b64f952ba6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/06178cbe2d024b19928bf2db35297965, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/8a21055d2cd1405c8570a2d680a57832] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=35.6 K 2024-11-20T19:27:16,564 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a784675ce2fa42cfb7ee49b64f952ba6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732130833849 2024-11-20T19:27:16,565 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 06178cbe2d024b19928bf2db35297965, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732130834475 2024-11-20T19:27:16,565 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a21055d2cd1405c8570a2d680a57832, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732130834877 2024-11-20T19:27:16,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:16,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:16,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:16,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:16,584 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#A#compaction#438 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:16,585 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/e5e25ee6a7954bdfa33f008ec30c5033 is 50, key is test_row_0/A:col10/1732130834877/Put/seqid=0 2024-11-20T19:27:16,596 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#B#compaction#439 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:16,596 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/991fa7573854436b86a4254253c469d8 is 50, key is test_row_0/B:col10/1732130834877/Put/seqid=0 2024-11-20T19:27:16,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742352_1528 (size=12493) 2024-11-20T19:27:16,633 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/991fa7573854436b86a4254253c469d8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/991fa7573854436b86a4254253c469d8 2024-11-20T19:27:16,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742351_1527 (size=12493) 2024-11-20T19:27:16,640 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/B of ddf3bf4b0d5353d829b30f0de5c7c11a into 991fa7573854436b86a4254253c469d8(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:16,640 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:16,640 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/B, priority=13, startTime=1732130836562; duration=0sec 2024-11-20T19:27:16,640 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:16,640 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:B 2024-11-20T19:27:16,640 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:16,641 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:16,641 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/C is initiating minor compaction (all files) 2024-11-20T19:27:16,641 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/C in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:16,641 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/125421f46b61444ebd924be91130e944, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/2f20d7f4cb9a451b877feb68ff02e255, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/f76e1e6c1f4645d38634c7aba93cdf2e] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=35.6 K 2024-11-20T19:27:16,642 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:16,642 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 125421f46b61444ebd924be91130e944, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732130833849 2024-11-20T19:27:16,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T19:27:16,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:16,642 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T19:27:16,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:16,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:16,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:16,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:16,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:16,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:16,644 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f20d7f4cb9a451b877feb68ff02e255, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732130834475 2024-11-20T19:27:16,646 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting f76e1e6c1f4645d38634c7aba93cdf2e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732130834877 2024-11-20T19:27:16,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/475c809f590042138c770b242540da96 is 50, key is test_row_0/A:col10/1732130835209/Put/seqid=0 2024-11-20T19:27:16,650 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/e5e25ee6a7954bdfa33f008ec30c5033 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/e5e25ee6a7954bdfa33f008ec30c5033 2024-11-20T19:27:16,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742353_1529 (size=12151) 2024-11-20T19:27:16,658 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/A of ddf3bf4b0d5353d829b30f0de5c7c11a into e5e25ee6a7954bdfa33f008ec30c5033(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:16,658 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:16,658 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/A, priority=13, startTime=1732130836556; duration=0sec 2024-11-20T19:27:16,659 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:16,659 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:A 2024-11-20T19:27:16,660 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#C#compaction#441 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:16,661 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/5850d9fc82d34b4b8d3727be16909d91 is 50, key is test_row_0/C:col10/1732130834877/Put/seqid=0 2024-11-20T19:27:16,662 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/475c809f590042138c770b242540da96 2024-11-20T19:27:16,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742354_1530 (size=12493) 2024-11-20T19:27:16,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/816dfeef83ed4a8496d2bda268ff5cbf is 50, key is test_row_0/B:col10/1732130835209/Put/seqid=0 2024-11-20T19:27:16,704 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/5850d9fc82d34b4b8d3727be16909d91 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/5850d9fc82d34b4b8d3727be16909d91 2024-11-20T19:27:16,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742355_1531 (size=12151) 2024-11-20T19:27:16,714 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/C of ddf3bf4b0d5353d829b30f0de5c7c11a into 5850d9fc82d34b4b8d3727be16909d91(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:16,714 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:16,714 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/C, priority=13, startTime=1732130836575; duration=0sec 2024-11-20T19:27:16,714 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:16,714 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:C 2024-11-20T19:27:16,715 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/816dfeef83ed4a8496d2bda268ff5cbf 2024-11-20T19:27:16,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/3f61f03e5cd34bf8a464f2bba102941b is 50, key is test_row_0/C:col10/1732130835209/Put/seqid=0 2024-11-20T19:27:16,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742356_1532 (size=12151) 2024-11-20T19:27:16,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T19:27:17,147 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/3f61f03e5cd34bf8a464f2bba102941b 2024-11-20T19:27:17,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/475c809f590042138c770b242540da96 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/475c809f590042138c770b242540da96 2024-11-20T19:27:17,160 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/475c809f590042138c770b242540da96, entries=150, sequenceid=170, filesize=11.9 K 2024-11-20T19:27:17,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/816dfeef83ed4a8496d2bda268ff5cbf as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/816dfeef83ed4a8496d2bda268ff5cbf 2024-11-20T19:27:17,165 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/816dfeef83ed4a8496d2bda268ff5cbf, entries=150, sequenceid=170, filesize=11.9 K 2024-11-20T19:27:17,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/3f61f03e5cd34bf8a464f2bba102941b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/3f61f03e5cd34bf8a464f2bba102941b 2024-11-20T19:27:17,170 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/3f61f03e5cd34bf8a464f2bba102941b, entries=150, sequenceid=170, filesize=11.9 K 2024-11-20T19:27:17,171 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for ddf3bf4b0d5353d829b30f0de5c7c11a in 529ms, sequenceid=170, compaction requested=false 2024-11-20T19:27:17,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:17,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:17,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-20T19:27:17,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-20T19:27:17,176 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-20T19:27:17,176 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4480 sec 2024-11-20T19:27:17,177 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.4550 sec 2024-11-20T19:27:17,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:17,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:27:17,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:17,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:17,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:17,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:17,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:17,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:17,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/9540fa4d9165401c9bc0ceb4c7ec8775 is 50, key is test_row_0/A:col10/1732130837364/Put/seqid=0 2024-11-20T19:27:17,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742357_1533 (size=19321) 2024-11-20T19:27:17,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/9540fa4d9165401c9bc0ceb4c7ec8775 2024-11-20T19:27:17,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/b7e55ebe355c46ee9e089ccc514f951e is 50, key is test_row_0/B:col10/1732130837364/Put/seqid=0 2024-11-20T19:27:17,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130897417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130897418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130897420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130897430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742358_1534 (size=12151) 2024-11-20T19:27:17,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/b7e55ebe355c46ee9e089ccc514f951e 2024-11-20T19:27:17,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/d9281cfde3af4c9f91928068d9604d6a is 50, key is test_row_0/C:col10/1732130837364/Put/seqid=0 2024-11-20T19:27:17,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742359_1535 (size=12151) 2024-11-20T19:27:17,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/d9281cfde3af4c9f91928068d9604d6a 2024-11-20T19:27:17,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/9540fa4d9165401c9bc0ceb4c7ec8775 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/9540fa4d9165401c9bc0ceb4c7ec8775 2024-11-20T19:27:17,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/9540fa4d9165401c9bc0ceb4c7ec8775, entries=300, sequenceid=183, filesize=18.9 K 2024-11-20T19:27:17,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/b7e55ebe355c46ee9e089ccc514f951e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/b7e55ebe355c46ee9e089ccc514f951e 2024-11-20T19:27:17,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/b7e55ebe355c46ee9e089ccc514f951e, entries=150, sequenceid=183, filesize=11.9 K 2024-11-20T19:27:17,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/d9281cfde3af4c9f91928068d9604d6a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d9281cfde3af4c9f91928068d9604d6a 2024-11-20T19:27:17,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d9281cfde3af4c9f91928068d9604d6a, entries=150, sequenceid=183, filesize=11.9 K 2024-11-20T19:27:17,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ddf3bf4b0d5353d829b30f0de5c7c11a in 141ms, sequenceid=183, compaction requested=true 2024-11-20T19:27:17,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:17,513 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:17,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:17,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:17,514 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:17,514 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:17,514 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/A is initiating minor compaction (all files) 2024-11-20T19:27:17,514 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/A in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:17,514 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/e5e25ee6a7954bdfa33f008ec30c5033, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/475c809f590042138c770b242540da96, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/9540fa4d9165401c9bc0ceb4c7ec8775] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=42.9 K 2024-11-20T19:27:17,515 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5e25ee6a7954bdfa33f008ec30c5033, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732130834877 2024-11-20T19:27:17,516 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 475c809f590042138c770b242540da96, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732130835209 2024-11-20T19:27:17,516 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9540fa4d9165401c9bc0ceb4c7ec8775, keycount=300, bloomtype=ROW, size=18.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732130837362 2024-11-20T19:27:17,516 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:17,516 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/B is initiating minor compaction (all files) 2024-11-20T19:27:17,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:17,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:17,516 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/B in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:17,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:17,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:17,516 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/991fa7573854436b86a4254253c469d8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/816dfeef83ed4a8496d2bda268ff5cbf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/b7e55ebe355c46ee9e089ccc514f951e] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=35.9 K 2024-11-20T19:27:17,519 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 991fa7573854436b86a4254253c469d8, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732130834877 2024-11-20T19:27:17,519 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 816dfeef83ed4a8496d2bda268ff5cbf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732130835209 2024-11-20T19:27:17,520 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting b7e55ebe355c46ee9e089ccc514f951e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732130837364 2024-11-20T19:27:17,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:27:17,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:17,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:17,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:17,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:17,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:17,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:17,540 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#A#compaction#447 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:17,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:17,540 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/06c04cbfb6724acabd218bb07741e875 is 50, key is test_row_0/A:col10/1732130837364/Put/seqid=0 2024-11-20T19:27:17,556 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#B#compaction#448 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:17,557 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/bf243f2a92dc4ca39b12fdc9339ef93c is 50, key is test_row_0/B:col10/1732130837364/Put/seqid=0 2024-11-20T19:27:17,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/789f0ed603524fa5b02ff7a71eec3b6c is 50, key is test_row_0/A:col10/1732130837419/Put/seqid=0 2024-11-20T19:27:17,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130897552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130897553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130897555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742360_1536 (size=12595) 2024-11-20T19:27:17,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130897568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,585 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/06c04cbfb6724acabd218bb07741e875 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/06c04cbfb6724acabd218bb07741e875 2024-11-20T19:27:17,592 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/A of ddf3bf4b0d5353d829b30f0de5c7c11a into 06c04cbfb6724acabd218bb07741e875(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:17,592 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:17,592 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/A, priority=13, startTime=1732130837513; duration=0sec 2024-11-20T19:27:17,592 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:17,592 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:A 2024-11-20T19:27:17,592 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:17,599 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:17,599 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/C is initiating minor compaction (all files) 2024-11-20T19:27:17,599 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/C in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:17,599 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/5850d9fc82d34b4b8d3727be16909d91, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/3f61f03e5cd34bf8a464f2bba102941b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d9281cfde3af4c9f91928068d9604d6a] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=35.9 K 2024-11-20T19:27:17,599 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5850d9fc82d34b4b8d3727be16909d91, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732130834877 2024-11-20T19:27:17,600 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f61f03e5cd34bf8a464f2bba102941b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732130835209 2024-11-20T19:27:17,600 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9281cfde3af4c9f91928068d9604d6a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732130837364 2024-11-20T19:27:17,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742362_1538 (size=14541) 2024-11-20T19:27:17,633 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/789f0ed603524fa5b02ff7a71eec3b6c 2024-11-20T19:27:17,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742361_1537 (size=12595) 2024-11-20T19:27:17,654 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/bf243f2a92dc4ca39b12fdc9339ef93c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bf243f2a92dc4ca39b12fdc9339ef93c 2024-11-20T19:27:17,661 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/B of ddf3bf4b0d5353d829b30f0de5c7c11a into bf243f2a92dc4ca39b12fdc9339ef93c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:17,661 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:17,661 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/B, priority=13, startTime=1732130837514; duration=0sec 2024-11-20T19:27:17,662 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:17,662 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:B 2024-11-20T19:27:17,664 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#C#compaction#450 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:17,665 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/bda324bbfe064e7ca7fd109c1f4d9a3a is 50, key is test_row_0/C:col10/1732130837364/Put/seqid=0 2024-11-20T19:27:17,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/1a9f314dac574e43b6083415c0260dc9 is 50, key is test_row_0/B:col10/1732130837419/Put/seqid=0 2024-11-20T19:27:17,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130897669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130897669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130897669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130897680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742363_1539 (size=12595) 2024-11-20T19:27:17,725 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/bda324bbfe064e7ca7fd109c1f4d9a3a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/bda324bbfe064e7ca7fd109c1f4d9a3a 2024-11-20T19:27:17,734 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/C of ddf3bf4b0d5353d829b30f0de5c7c11a into bda324bbfe064e7ca7fd109c1f4d9a3a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:17,734 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:17,734 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/C, priority=13, startTime=1732130837516; duration=0sec 2024-11-20T19:27:17,735 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:17,735 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:C 2024-11-20T19:27:17,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742364_1540 (size=12151) 2024-11-20T19:27:17,756 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/1a9f314dac574e43b6083415c0260dc9 2024-11-20T19:27:17,779 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/060f02b78ae34287a0b97f4e31d1bb6e is 50, key is test_row_0/C:col10/1732130837419/Put/seqid=0 2024-11-20T19:27:17,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742365_1541 (size=12151) 2024-11-20T19:27:17,805 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/060f02b78ae34287a0b97f4e31d1bb6e 2024-11-20T19:27:17,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/789f0ed603524fa5b02ff7a71eec3b6c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/789f0ed603524fa5b02ff7a71eec3b6c 2024-11-20T19:27:17,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/789f0ed603524fa5b02ff7a71eec3b6c, entries=200, sequenceid=209, filesize=14.2 K 2024-11-20T19:27:17,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/1a9f314dac574e43b6083415c0260dc9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/1a9f314dac574e43b6083415c0260dc9 2024-11-20T19:27:17,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T19:27:17,830 INFO [Thread-2203 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-20T19:27:17,831 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:17,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-20T19:27:17,832 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:17,833 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:17,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:17,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/1a9f314dac574e43b6083415c0260dc9, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T19:27:17,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T19:27:17,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/060f02b78ae34287a0b97f4e31d1bb6e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/060f02b78ae34287a0b97f4e31d1bb6e 2024-11-20T19:27:17,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/060f02b78ae34287a0b97f4e31d1bb6e, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T19:27:17,849 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ddf3bf4b0d5353d829b30f0de5c7c11a in 310ms, sequenceid=209, compaction requested=false 2024-11-20T19:27:17,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:17,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:17,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:27:17,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:17,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:17,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:17,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:17,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:17,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:17,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/024cab535c4b45f48081679b20257094 is 50, key is test_row_0/A:col10/1732130837566/Put/seqid=0 2024-11-20T19:27:17,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742366_1542 (size=14541) 2024-11-20T19:27:17,908 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/024cab535c4b45f48081679b20257094 2024-11-20T19:27:17,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/bf95fc2d27dd469fa71592c7ebf6daba is 50, key is test_row_0/B:col10/1732130837566/Put/seqid=0 2024-11-20T19:27:17,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742367_1543 (size=12151) 2024-11-20T19:27:17,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130897932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130897934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T19:27:17,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130897938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130897941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,993 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:17,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T19:27:17,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:17,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:17,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:17,994 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:17,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:17,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:18,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130898045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130898046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130898052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130898052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,146 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T19:27:18,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T19:27:18,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:18,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:18,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:18,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:18,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:18,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:18,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130898257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130898258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130898258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130898261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,300 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T19:27:18,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:18,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:18,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:18,300 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:18,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:18,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:18,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/bf95fc2d27dd469fa71592c7ebf6daba 2024-11-20T19:27:18,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/d983f2920d8549798fc27711cbdbd385 is 50, key is test_row_0/C:col10/1732130837566/Put/seqid=0 2024-11-20T19:27:18,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742368_1544 (size=12151) 2024-11-20T19:27:18,366 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/d983f2920d8549798fc27711cbdbd385 2024-11-20T19:27:18,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/024cab535c4b45f48081679b20257094 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/024cab535c4b45f48081679b20257094 2024-11-20T19:27:18,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/024cab535c4b45f48081679b20257094, entries=200, sequenceid=223, filesize=14.2 K 2024-11-20T19:27:18,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/bf95fc2d27dd469fa71592c7ebf6daba as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bf95fc2d27dd469fa71592c7ebf6daba 2024-11-20T19:27:18,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bf95fc2d27dd469fa71592c7ebf6daba, entries=150, sequenceid=223, filesize=11.9 K 2024-11-20T19:27:18,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/d983f2920d8549798fc27711cbdbd385 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d983f2920d8549798fc27711cbdbd385 2024-11-20T19:27:18,392 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d983f2920d8549798fc27711cbdbd385, entries=150, sequenceid=223, filesize=11.9 K 2024-11-20T19:27:18,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ddf3bf4b0d5353d829b30f0de5c7c11a in 511ms, sequenceid=223, compaction requested=true 2024-11-20T19:27:18,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:18,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:18,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,393 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:18,393 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:18,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:18,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:18,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:18,395 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:18,395 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/A is initiating minor compaction (all files) 2024-11-20T19:27:18,395 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/A in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:18,395 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/06c04cbfb6724acabd218bb07741e875, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/789f0ed603524fa5b02ff7a71eec3b6c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/024cab535c4b45f48081679b20257094] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=40.7 K 2024-11-20T19:27:18,395 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06c04cbfb6724acabd218bb07741e875, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732130837364 2024-11-20T19:27:18,395 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:18,395 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/B is initiating minor compaction (all files) 2024-11-20T19:27:18,395 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/B in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:18,396 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bf243f2a92dc4ca39b12fdc9339ef93c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/1a9f314dac574e43b6083415c0260dc9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bf95fc2d27dd469fa71592c7ebf6daba] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=36.0 K 2024-11-20T19:27:18,396 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 789f0ed603524fa5b02ff7a71eec3b6c, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732130837402 2024-11-20T19:27:18,396 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting bf243f2a92dc4ca39b12fdc9339ef93c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732130837364 2024-11-20T19:27:18,396 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 024cab535c4b45f48081679b20257094, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732130837540 2024-11-20T19:27:18,396 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a9f314dac574e43b6083415c0260dc9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732130837402 2024-11-20T19:27:18,397 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting bf95fc2d27dd469fa71592c7ebf6daba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732130837552 2024-11-20T19:27:18,404 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#A#compaction#456 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:18,405 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#B#compaction#457 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:18,405 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/86de3260331e49dea3d245a999d49c5e is 50, key is test_row_0/A:col10/1732130837566/Put/seqid=0 2024-11-20T19:27:18,405 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/c25d3f509f124f4bb6b013b2bf6b8942 is 50, key is test_row_0/B:col10/1732130837566/Put/seqid=0 2024-11-20T19:27:18,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742369_1545 (size=12697) 2024-11-20T19:27:18,438 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/c25d3f509f124f4bb6b013b2bf6b8942 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/c25d3f509f124f4bb6b013b2bf6b8942 2024-11-20T19:27:18,444 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/B of ddf3bf4b0d5353d829b30f0de5c7c11a into c25d3f509f124f4bb6b013b2bf6b8942(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:18,444 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:18,444 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/B, priority=13, startTime=1732130838393; duration=0sec 2024-11-20T19:27:18,444 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:18,444 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:B 2024-11-20T19:27:18,445 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:18,445 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:18,446 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/C is initiating minor compaction (all files) 2024-11-20T19:27:18,446 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/C in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:18,446 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/bda324bbfe064e7ca7fd109c1f4d9a3a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/060f02b78ae34287a0b97f4e31d1bb6e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d983f2920d8549798fc27711cbdbd385] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=36.0 K 2024-11-20T19:27:18,446 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting bda324bbfe064e7ca7fd109c1f4d9a3a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732130837364 2024-11-20T19:27:18,446 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 060f02b78ae34287a0b97f4e31d1bb6e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732130837402 2024-11-20T19:27:18,446 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d983f2920d8549798fc27711cbdbd385, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732130837552 2024-11-20T19:27:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T19:27:18,452 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,453 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T19:27:18,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:18,453 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:27:18,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:18,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:18,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:18,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742370_1546 (size=12697) 2024-11-20T19:27:18,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/13b7ef3a59b942c3846f9cc867fd1e69 is 50, key is test_row_0/A:col10/1732130837937/Put/seqid=0 2024-11-20T19:27:18,465 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/86de3260331e49dea3d245a999d49c5e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/86de3260331e49dea3d245a999d49c5e 2024-11-20T19:27:18,468 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#C#compaction#459 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:18,469 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/846e2e31566e4f278dd1239e545c4ab4 is 50, key is test_row_0/C:col10/1732130837566/Put/seqid=0 2024-11-20T19:27:18,476 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/A of ddf3bf4b0d5353d829b30f0de5c7c11a into 86de3260331e49dea3d245a999d49c5e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:18,476 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:18,476 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/A, priority=13, startTime=1732130838393; duration=0sec 2024-11-20T19:27:18,476 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,476 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:A 2024-11-20T19:27:18,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742372_1548 (size=12697) 2024-11-20T19:27:18,499 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/846e2e31566e4f278dd1239e545c4ab4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/846e2e31566e4f278dd1239e545c4ab4 2024-11-20T19:27:18,506 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/C of ddf3bf4b0d5353d829b30f0de5c7c11a into 846e2e31566e4f278dd1239e545c4ab4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:18,507 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:18,507 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/C, priority=13, startTime=1732130838394; duration=0sec 2024-11-20T19:27:18,507 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,507 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:C 2024-11-20T19:27:18,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742371_1547 (size=12151) 2024-11-20T19:27:18,518 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/13b7ef3a59b942c3846f9cc867fd1e69 2024-11-20T19:27:18,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/43f0cc7f93bd4f3d86adb3da57109494 is 50, key is test_row_0/B:col10/1732130837937/Put/seqid=0 2024-11-20T19:27:18,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:18,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:18,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742373_1549 (size=12151) 2024-11-20T19:27:18,593 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/43f0cc7f93bd4f3d86adb3da57109494 2024-11-20T19:27:18,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/3151875afb704053b06a9e66299e607d is 50, key is test_row_0/C:col10/1732130837937/Put/seqid=0 2024-11-20T19:27:18,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130898604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130898604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130898604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130898605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742374_1550 (size=12151) 2024-11-20T19:27:18,654 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/3151875afb704053b06a9e66299e607d 2024-11-20T19:27:18,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/13b7ef3a59b942c3846f9cc867fd1e69 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/13b7ef3a59b942c3846f9cc867fd1e69 2024-11-20T19:27:18,667 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/13b7ef3a59b942c3846f9cc867fd1e69, entries=150, sequenceid=249, filesize=11.9 K 2024-11-20T19:27:18,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/43f0cc7f93bd4f3d86adb3da57109494 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/43f0cc7f93bd4f3d86adb3da57109494 2024-11-20T19:27:18,674 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/43f0cc7f93bd4f3d86adb3da57109494, entries=150, sequenceid=249, filesize=11.9 K 2024-11-20T19:27:18,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/3151875afb704053b06a9e66299e607d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/3151875afb704053b06a9e66299e607d 2024-11-20T19:27:18,679 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/3151875afb704053b06a9e66299e607d, entries=150, sequenceid=249, filesize=11.9 K 2024-11-20T19:27:18,680 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ddf3bf4b0d5353d829b30f0de5c7c11a in 227ms, sequenceid=249, compaction requested=false 2024-11-20T19:27:18,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:18,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:18,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-20T19:27:18,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-20T19:27:18,683 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-20T19:27:18,683 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 848 msec 2024-11-20T19:27:18,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 852 msec 2024-11-20T19:27:18,715 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:27:18,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:18,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:18,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:18,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:18,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/7bcc156b7aa44737980c6c366637518f is 50, key is test_row_0/A:col10/1732130838601/Put/seqid=0 2024-11-20T19:27:18,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742375_1551 (size=14691) 2024-11-20T19:27:18,740 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/7bcc156b7aa44737980c6c366637518f 2024-11-20T19:27:18,756 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/da1c2f37934448528a9c17452a1fe692 is 50, key is test_row_0/B:col10/1732130838601/Put/seqid=0 2024-11-20T19:27:18,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130898772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130898773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130898773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130898784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742376_1552 (size=12301) 2024-11-20T19:27:18,794 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/da1c2f37934448528a9c17452a1fe692 2024-11-20T19:27:18,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/7680918979834ce3ad5ba2330f86f21e is 50, key is test_row_0/C:col10/1732130838601/Put/seqid=0 2024-11-20T19:27:18,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742377_1553 (size=12301) 2024-11-20T19:27:18,844 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/7680918979834ce3ad5ba2330f86f21e 2024-11-20T19:27:18,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/7bcc156b7aa44737980c6c366637518f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/7bcc156b7aa44737980c6c366637518f 2024-11-20T19:27:18,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/7bcc156b7aa44737980c6c366637518f, entries=200, sequenceid=263, filesize=14.3 K 2024-11-20T19:27:18,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/da1c2f37934448528a9c17452a1fe692 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/da1c2f37934448528a9c17452a1fe692 2024-11-20T19:27:18,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/da1c2f37934448528a9c17452a1fe692, entries=150, sequenceid=263, filesize=12.0 K 2024-11-20T19:27:18,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/7680918979834ce3ad5ba2330f86f21e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7680918979834ce3ad5ba2330f86f21e 2024-11-20T19:27:18,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7680918979834ce3ad5ba2330f86f21e, entries=150, sequenceid=263, filesize=12.0 K 2024-11-20T19:27:18,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ddf3bf4b0d5353d829b30f0de5c7c11a in 161ms, sequenceid=263, compaction requested=true 2024-11-20T19:27:18,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:18,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:18,876 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:18,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:18,876 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:18,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:18,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:18,877 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39539 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:18,877 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:18,877 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/B is initiating minor compaction (all files) 2024-11-20T19:27:18,877 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/A is initiating minor compaction (all files) 2024-11-20T19:27:18,877 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/A in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:18,877 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/B in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:18,877 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/c25d3f509f124f4bb6b013b2bf6b8942, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/43f0cc7f93bd4f3d86adb3da57109494, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/da1c2f37934448528a9c17452a1fe692] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=36.3 K 2024-11-20T19:27:18,877 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/86de3260331e49dea3d245a999d49c5e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/13b7ef3a59b942c3846f9cc867fd1e69, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/7bcc156b7aa44737980c6c366637518f] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=38.6 K 2024-11-20T19:27:18,877 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c25d3f509f124f4bb6b013b2bf6b8942, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732130837552 2024-11-20T19:27:18,878 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86de3260331e49dea3d245a999d49c5e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732130837552 2024-11-20T19:27:18,878 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13b7ef3a59b942c3846f9cc867fd1e69, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732130837927 2024-11-20T19:27:18,878 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 43f0cc7f93bd4f3d86adb3da57109494, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732130837927 2024-11-20T19:27:18,878 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting da1c2f37934448528a9c17452a1fe692, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732130838601 2024-11-20T19:27:18,878 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7bcc156b7aa44737980c6c366637518f, keycount=200, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732130838566 2024-11-20T19:27:18,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:18,892 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:27:18,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:18,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:18,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:18,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,897 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#B#compaction#465 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:18,897 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/d51c432d750e406da66213b8eada476b is 50, key is test_row_0/B:col10/1732130838601/Put/seqid=0 2024-11-20T19:27:18,903 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#A#compaction#466 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:18,903 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/1befc85a318e4284af20cfb94215f0c0 is 50, key is test_row_0/A:col10/1732130838601/Put/seqid=0 2024-11-20T19:27:18,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130898910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,918 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/839caeddc9554df99feb45a81b746173 is 50, key is test_row_0/A:col10/1732130838782/Put/seqid=0 2024-11-20T19:27:18,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130898910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130898916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130898918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T19:27:18,948 INFO [Thread-2203 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-20T19:27:18,949 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-20T19:27:18,950 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:18,951 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:18,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:18,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742378_1554 (size=12949) 2024-11-20T19:27:18,963 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/d51c432d750e406da66213b8eada476b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/d51c432d750e406da66213b8eada476b 2024-11-20T19:27:18,972 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/B of ddf3bf4b0d5353d829b30f0de5c7c11a into d51c432d750e406da66213b8eada476b(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:18,972 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:18,972 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/B, priority=13, startTime=1732130838876; duration=0sec 2024-11-20T19:27:18,972 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:18,972 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:B 2024-11-20T19:27:18,972 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:18,973 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:18,973 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/C is initiating minor compaction (all files) 2024-11-20T19:27:18,973 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/C in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:18,973 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/846e2e31566e4f278dd1239e545c4ab4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/3151875afb704053b06a9e66299e607d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7680918979834ce3ad5ba2330f86f21e] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=36.3 K 2024-11-20T19:27:18,974 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 846e2e31566e4f278dd1239e545c4ab4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732130837552 2024-11-20T19:27:18,974 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 3151875afb704053b06a9e66299e607d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732130837927 2024-11-20T19:27:18,974 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 7680918979834ce3ad5ba2330f86f21e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732130838601 2024-11-20T19:27:18,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742379_1555 (size=12949) 2024-11-20T19:27:18,984 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/1befc85a318e4284af20cfb94215f0c0 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/1befc85a318e4284af20cfb94215f0c0 2024-11-20T19:27:18,988 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/A of ddf3bf4b0d5353d829b30f0de5c7c11a into 1befc85a318e4284af20cfb94215f0c0(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:18,988 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:18,988 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/A, priority=13, startTime=1732130838876; duration=0sec 2024-11-20T19:27:18,988 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,988 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:A 2024-11-20T19:27:18,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742380_1556 (size=14741) 2024-11-20T19:27:18,993 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#C#compaction#468 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:18,994 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/7bdb34d0c05240f487af3de5da77925e is 50, key is test_row_0/C:col10/1732130838601/Put/seqid=0 2024-11-20T19:27:19,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130899019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130899023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130899023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130899030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742381_1557 (size=12949) 2024-11-20T19:27:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:19,102 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:19,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:19,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:19,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:19,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130899226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130899229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130899229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130899238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:19,255 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:19,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:19,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:19,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:19,255 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/839caeddc9554df99feb45a81b746173 2024-11-20T19:27:19,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/aa9c5173269a498d89ce9a567005dbfb is 50, key is test_row_0/B:col10/1732130838782/Put/seqid=0 2024-11-20T19:27:19,407 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:19,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:19,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:19,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:19,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742382_1558 (size=12301) 2024-11-20T19:27:19,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/aa9c5173269a498d89ce9a567005dbfb 2024-11-20T19:27:19,459 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/7bdb34d0c05240f487af3de5da77925e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7bdb34d0c05240f487af3de5da77925e 2024-11-20T19:27:19,465 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/C of ddf3bf4b0d5353d829b30f0de5c7c11a into 7bdb34d0c05240f487af3de5da77925e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:19,465 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:19,465 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/C, priority=13, startTime=1732130838876; duration=0sec 2024-11-20T19:27:19,465 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:19,465 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:C 2024-11-20T19:27:19,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/bf4ffd2b955f46088cd596fcbd9fe910 is 50, key is test_row_0/C:col10/1732130838782/Put/seqid=0 2024-11-20T19:27:19,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742383_1559 (size=12301) 2024-11-20T19:27:19,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130899532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130899536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130899536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130899546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:19,560 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:19,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:19,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:19,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:19,561 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,713 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:19,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:19,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:19,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:19,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,865 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:19,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:19,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:19,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:19,866 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/bf4ffd2b955f46088cd596fcbd9fe910 2024-11-20T19:27:19,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/839caeddc9554df99feb45a81b746173 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/839caeddc9554df99feb45a81b746173 2024-11-20T19:27:19,908 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/839caeddc9554df99feb45a81b746173, entries=200, sequenceid=288, filesize=14.4 K 2024-11-20T19:27:19,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/aa9c5173269a498d89ce9a567005dbfb as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/aa9c5173269a498d89ce9a567005dbfb 2024-11-20T19:27:19,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59050 deadline: 1732130899917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:19,922 DEBUG [Thread-2199 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8254 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., hostname=db9c3a6c6492,41229,1732130701496, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:19,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/aa9c5173269a498d89ce9a567005dbfb, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T19:27:19,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/bf4ffd2b955f46088cd596fcbd9fe910 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/bf4ffd2b955f46088cd596fcbd9fe910 2024-11-20T19:27:19,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/bf4ffd2b955f46088cd596fcbd9fe910, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T19:27:19,945 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ddf3bf4b0d5353d829b30f0de5c7c11a in 1053ms, sequenceid=288, compaction requested=false 2024-11-20T19:27:19,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:20,017 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:20,018 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:27:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:20,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:20,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/357bf079291b44648244206f1254f9c4 is 50, key is test_row_0/A:col10/1732130838917/Put/seqid=0 2024-11-20T19:27:20,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742384_1560 (size=12301) 2024-11-20T19:27:20,032 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/357bf079291b44648244206f1254f9c4 2024-11-20T19:27:20,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/f3b9966d7f75432598c2a8198b9e9b3e is 50, key is test_row_0/B:col10/1732130838917/Put/seqid=0 2024-11-20T19:27:20,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:20,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:20,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:20,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742385_1561 (size=12301) 2024-11-20T19:27:20,078 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/f3b9966d7f75432598c2a8198b9e9b3e 2024-11-20T19:27:20,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/e0a8054621424ac893864f9acefa9a74 is 50, key is test_row_0/C:col10/1732130838917/Put/seqid=0 2024-11-20T19:27:20,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130900088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130900094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742386_1562 (size=12301) 2024-11-20T19:27:20,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130900099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130900100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130900201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130900201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130900211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130900211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130900407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130900407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130900415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130900418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,508 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/e0a8054621424ac893864f9acefa9a74 2024-11-20T19:27:20,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/357bf079291b44648244206f1254f9c4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/357bf079291b44648244206f1254f9c4 2024-11-20T19:27:20,516 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/357bf079291b44648244206f1254f9c4, entries=150, sequenceid=302, filesize=12.0 K 2024-11-20T19:27:20,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/f3b9966d7f75432598c2a8198b9e9b3e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/f3b9966d7f75432598c2a8198b9e9b3e 2024-11-20T19:27:20,520 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/f3b9966d7f75432598c2a8198b9e9b3e, entries=150, sequenceid=302, filesize=12.0 K 2024-11-20T19:27:20,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/e0a8054621424ac893864f9acefa9a74 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/e0a8054621424ac893864f9acefa9a74 2024-11-20T19:27:20,526 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/e0a8054621424ac893864f9acefa9a74, entries=150, sequenceid=302, filesize=12.0 K 2024-11-20T19:27:20,527 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ddf3bf4b0d5353d829b30f0de5c7c11a in 509ms, sequenceid=302, compaction requested=true 2024-11-20T19:27:20,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:20,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:20,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-20T19:27:20,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-20T19:27:20,529 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-20T19:27:20,529 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5770 sec 2024-11-20T19:27:20,530 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.5800 sec 2024-11-20T19:27:20,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:20,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:27:20,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:20,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:20,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:20,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:20,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:20,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:20,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/d104ff4b99d84c54bb70a1c556b5f813 is 50, key is test_row_0/A:col10/1732130840093/Put/seqid=0 2024-11-20T19:27:20,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130900736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130900737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130900745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130900746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742387_1563 (size=14741) 2024-11-20T19:27:20,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130900853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130900853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130900853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:20,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130900854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:21,055 INFO [Thread-2203 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-20T19:27:21,056 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:21,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-20T19:27:21,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T19:27:21,058 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:21,059 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:21,059 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:21,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130901057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130901057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130901058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130901058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T19:27:21,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/d104ff4b99d84c54bb70a1c556b5f813 2024-11-20T19:27:21,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/7967bb0d1c5345a4bbe504e28de316a2 is 50, key is test_row_0/B:col10/1732130840093/Put/seqid=0 2024-11-20T19:27:21,210 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T19:27:21,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:21,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:21,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:21,211 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742388_1564 (size=12301) 2024-11-20T19:27:21,249 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/7967bb0d1c5345a4bbe504e28de316a2 2024-11-20T19:27:21,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/80b92d1ef727448eb9a1f37867513375 is 50, key is test_row_0/C:col10/1732130840093/Put/seqid=0 2024-11-20T19:27:21,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742389_1565 (size=12301) 2024-11-20T19:27:21,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T19:27:21,363 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T19:27:21,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:21,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:21,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:21,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130901367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130901367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130901368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130901370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,519 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T19:27:21,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:21,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:21,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:21,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T19:27:21,673 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T19:27:21,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:21,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:21,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:21,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/80b92d1ef727448eb9a1f37867513375 2024-11-20T19:27:21,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/d104ff4b99d84c54bb70a1c556b5f813 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d104ff4b99d84c54bb70a1c556b5f813 2024-11-20T19:27:21,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d104ff4b99d84c54bb70a1c556b5f813, entries=200, sequenceid=328, filesize=14.4 K 2024-11-20T19:27:21,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/7967bb0d1c5345a4bbe504e28de316a2 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/7967bb0d1c5345a4bbe504e28de316a2 2024-11-20T19:27:21,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/7967bb0d1c5345a4bbe504e28de316a2, entries=150, sequenceid=328, filesize=12.0 K 2024-11-20T19:27:21,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/80b92d1ef727448eb9a1f37867513375 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/80b92d1ef727448eb9a1f37867513375 2024-11-20T19:27:21,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/80b92d1ef727448eb9a1f37867513375, entries=150, sequenceid=328, filesize=12.0 K 2024-11-20T19:27:21,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ddf3bf4b0d5353d829b30f0de5c7c11a in 1013ms, sequenceid=328, compaction requested=true 2024-11-20T19:27:21,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:21,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:21,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:21,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:21,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:27:21,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:21,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T19:27:21,735 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:21,736 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:21,737 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:21,737 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/C is initiating minor compaction (all files) 2024-11-20T19:27:21,737 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/C in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:21,737 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7bdb34d0c05240f487af3de5da77925e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/bf4ffd2b955f46088cd596fcbd9fe910, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/e0a8054621424ac893864f9acefa9a74, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/80b92d1ef727448eb9a1f37867513375] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=48.7 K 2024-11-20T19:27:21,738 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54732 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:21,738 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/A is initiating minor compaction (all files) 2024-11-20T19:27:21,738 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/A in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:21,738 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/1befc85a318e4284af20cfb94215f0c0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/839caeddc9554df99feb45a81b746173, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/357bf079291b44648244206f1254f9c4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d104ff4b99d84c54bb70a1c556b5f813] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=53.4 K 2024-11-20T19:27:21,738 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bdb34d0c05240f487af3de5da77925e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732130838601 2024-11-20T19:27:21,739 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1befc85a318e4284af20cfb94215f0c0, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732130838601 2024-11-20T19:27:21,739 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting bf4ffd2b955f46088cd596fcbd9fe910, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732130838771 2024-11-20T19:27:21,739 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 839caeddc9554df99feb45a81b746173, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732130838771 2024-11-20T19:27:21,739 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 357bf079291b44648244206f1254f9c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732130838909 2024-11-20T19:27:21,739 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting e0a8054621424ac893864f9acefa9a74, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732130838909 2024-11-20T19:27:21,740 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d104ff4b99d84c54bb70a1c556b5f813, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732130840083 2024-11-20T19:27:21,740 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 80b92d1ef727448eb9a1f37867513375, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732130840093 2024-11-20T19:27:21,758 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#C#compaction#477 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:21,759 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/8ee310ad2f3e4f348f3692d69a04c552 is 50, key is test_row_0/C:col10/1732130840093/Put/seqid=0 2024-11-20T19:27:21,773 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#A#compaction#478 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:21,773 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/94c58e15b9a64706af81bb47e69ee24e is 50, key is test_row_0/A:col10/1732130840093/Put/seqid=0 2024-11-20T19:27:21,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742390_1566 (size=13085) 2024-11-20T19:27:21,794 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/8ee310ad2f3e4f348f3692d69a04c552 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8ee310ad2f3e4f348f3692d69a04c552 2024-11-20T19:27:21,799 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/C of ddf3bf4b0d5353d829b30f0de5c7c11a into 8ee310ad2f3e4f348f3692d69a04c552(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:21,799 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:21,799 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/C, priority=12, startTime=1732130841735; duration=0sec 2024-11-20T19:27:21,800 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:21,800 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:C 2024-11-20T19:27:21,800 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:21,802 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:21,802 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/B is initiating minor compaction (all files) 2024-11-20T19:27:21,803 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/B in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:21,803 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/d51c432d750e406da66213b8eada476b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/aa9c5173269a498d89ce9a567005dbfb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/f3b9966d7f75432598c2a8198b9e9b3e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/7967bb0d1c5345a4bbe504e28de316a2] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=48.7 K 2024-11-20T19:27:21,803 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d51c432d750e406da66213b8eada476b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732130838601 2024-11-20T19:27:21,804 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting aa9c5173269a498d89ce9a567005dbfb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732130838771 2024-11-20T19:27:21,805 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting f3b9966d7f75432598c2a8198b9e9b3e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732130838909 2024-11-20T19:27:21,806 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 7967bb0d1c5345a4bbe504e28de316a2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732130840093 2024-11-20T19:27:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742391_1567 (size=13085) 2024-11-20T19:27:21,821 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#B#compaction#479 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:21,821 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/b5991448caa44b6681ea1969ca9e1e94 is 50, key is test_row_0/B:col10/1732130840093/Put/seqid=0 2024-11-20T19:27:21,825 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T19:27:21,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:21,826 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T19:27:21,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:21,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:21,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:21,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:21,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:21,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:21,830 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/94c58e15b9a64706af81bb47e69ee24e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/94c58e15b9a64706af81bb47e69ee24e 2024-11-20T19:27:21,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/f1d09436093f467bb175402e4d2ba3bf is 50, key is test_row_0/A:col10/1732130840744/Put/seqid=0 2024-11-20T19:27:21,841 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/A of ddf3bf4b0d5353d829b30f0de5c7c11a into 94c58e15b9a64706af81bb47e69ee24e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:21,841 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:21,841 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/A, priority=12, startTime=1732130841735; duration=0sec 2024-11-20T19:27:21,841 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:21,841 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:A 2024-11-20T19:27:21,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742393_1569 (size=9857) 2024-11-20T19:27:21,851 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/f1d09436093f467bb175402e4d2ba3bf 2024-11-20T19:27:21,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742392_1568 (size=13085) 2024-11-20T19:27:21,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/9cae62bfa0dd4ef8978e68103d7cd787 is 50, key is test_row_0/B:col10/1732130840744/Put/seqid=0 2024-11-20T19:27:21,869 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/b5991448caa44b6681ea1969ca9e1e94 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/b5991448caa44b6681ea1969ca9e1e94 2024-11-20T19:27:21,877 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/B of ddf3bf4b0d5353d829b30f0de5c7c11a into b5991448caa44b6681ea1969ca9e1e94(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:21,877 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:21,877 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/B, priority=12, startTime=1732130841735; duration=0sec 2024-11-20T19:27:21,877 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:21,877 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:B 2024-11-20T19:27:21,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:21,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:21,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742394_1570 (size=9857) 2024-11-20T19:27:21,895 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/9cae62bfa0dd4ef8978e68103d7cd787 2024-11-20T19:27:21,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/5c51558a7e8a4dad847f0f941260e6c2 is 50, key is test_row_0/C:col10/1732130840744/Put/seqid=0 2024-11-20T19:27:21,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742395_1571 (size=9857) 2024-11-20T19:27:21,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130901914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130901918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130901921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:21,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130901922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130902022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130902023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130902030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130902030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T19:27:22,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130902228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130902229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130902232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130902232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,322 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/5c51558a7e8a4dad847f0f941260e6c2 2024-11-20T19:27:22,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/f1d09436093f467bb175402e4d2ba3bf as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/f1d09436093f467bb175402e4d2ba3bf 2024-11-20T19:27:22,328 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/f1d09436093f467bb175402e4d2ba3bf, entries=100, sequenceid=339, filesize=9.6 K 2024-11-20T19:27:22,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/9cae62bfa0dd4ef8978e68103d7cd787 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/9cae62bfa0dd4ef8978e68103d7cd787 2024-11-20T19:27:22,332 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/9cae62bfa0dd4ef8978e68103d7cd787, entries=100, sequenceid=339, filesize=9.6 K 2024-11-20T19:27:22,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/5c51558a7e8a4dad847f0f941260e6c2 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/5c51558a7e8a4dad847f0f941260e6c2 2024-11-20T19:27:22,336 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/5c51558a7e8a4dad847f0f941260e6c2, entries=100, sequenceid=339, filesize=9.6 K 2024-11-20T19:27:22,337 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for ddf3bf4b0d5353d829b30f0de5c7c11a in 511ms, sequenceid=339, compaction requested=false 2024-11-20T19:27:22,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:22,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:22,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-20T19:27:22,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-20T19:27:22,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-20T19:27:22,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2790 sec 2024-11-20T19:27:22,341 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.2840 sec 2024-11-20T19:27:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:22,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T19:27:22,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:22,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:22,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:22,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:22,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:22,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:22,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/419fe85b2121483bbb25e7cc63286718 is 50, key is test_row_0/A:col10/1732130841919/Put/seqid=0 2024-11-20T19:27:22,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742396_1572 (size=14741) 2024-11-20T19:27:22,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/419fe85b2121483bbb25e7cc63286718 2024-11-20T19:27:22,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130902549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130902549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130902550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130902549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/cea672ae52e84d5998905614404d0b06 is 50, key is test_row_0/B:col10/1732130841919/Put/seqid=0 2024-11-20T19:27:22,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742397_1573 (size=12301) 2024-11-20T19:27:22,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130902661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130902662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130902663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130902663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130902866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130902870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130902870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:22,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130902871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/cea672ae52e84d5998905614404d0b06 2024-11-20T19:27:23,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/e58d7974bf33424987ac7db610263a7c is 50, key is test_row_0/C:col10/1732130841919/Put/seqid=0 2024-11-20T19:27:23,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742398_1574 (size=12301) 2024-11-20T19:27:23,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T19:27:23,163 INFO [Thread-2203 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-20T19:27:23,164 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:23,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-20T19:27:23,166 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:23,166 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:23,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:23,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:23,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:23,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130903171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:23,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130903175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:23,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130903175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:23,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130903175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:23,323 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T19:27:23,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:23,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:23,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:23,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:23,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:23,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:23,458 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/e58d7974bf33424987ac7db610263a7c 2024-11-20T19:27:23,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/419fe85b2121483bbb25e7cc63286718 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/419fe85b2121483bbb25e7cc63286718 2024-11-20T19:27:23,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/419fe85b2121483bbb25e7cc63286718, entries=200, sequenceid=368, filesize=14.4 K 2024-11-20T19:27:23,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/cea672ae52e84d5998905614404d0b06 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/cea672ae52e84d5998905614404d0b06 2024-11-20T19:27:23,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:23,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/cea672ae52e84d5998905614404d0b06, entries=150, sequenceid=368, filesize=12.0 K 2024-11-20T19:27:23,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/e58d7974bf33424987ac7db610263a7c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/e58d7974bf33424987ac7db610263a7c 2024-11-20T19:27:23,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/e58d7974bf33424987ac7db610263a7c, entries=150, sequenceid=368, filesize=12.0 K 2024-11-20T19:27:23,472 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T19:27:23,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for ddf3bf4b0d5353d829b30f0de5c7c11a in 934ms, sequenceid=368, compaction requested=true 2024-11-20T19:27:23,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:23,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:23,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:23,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:23,472 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:23,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:23,472 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:23,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:23,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:23,473 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:23,473 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/B is initiating minor compaction (all files) 2024-11-20T19:27:23,473 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/B in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:23,473 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/b5991448caa44b6681ea1969ca9e1e94, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/9cae62bfa0dd4ef8978e68103d7cd787, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/cea672ae52e84d5998905614404d0b06] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=34.4 K 2024-11-20T19:27:23,474 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:23,474 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/A is initiating minor compaction (all files) 2024-11-20T19:27:23,474 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/A in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:23,474 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/94c58e15b9a64706af81bb47e69ee24e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/f1d09436093f467bb175402e4d2ba3bf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/419fe85b2121483bbb25e7cc63286718] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=36.8 K 2024-11-20T19:27:23,474 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting b5991448caa44b6681ea1969ca9e1e94, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732130840093 2024-11-20T19:27:23,474 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94c58e15b9a64706af81bb47e69ee24e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732130840093 2024-11-20T19:27:23,474 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cae62bfa0dd4ef8978e68103d7cd787, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732130840736 2024-11-20T19:27:23,474 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1d09436093f467bb175402e4d2ba3bf, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732130840736 2024-11-20T19:27:23,474 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting cea672ae52e84d5998905614404d0b06, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732130841913 2024-11-20T19:27:23,474 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 419fe85b2121483bbb25e7cc63286718, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732130841913 2024-11-20T19:27:23,475 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T19:27:23,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:23,476 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T19:27:23,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:23,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:23,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:23,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:23,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:23,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:23,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/b767be6318754c309159aba0f1634fc4 is 50, key is test_row_0/A:col10/1732130842548/Put/seqid=0 2024-11-20T19:27:23,481 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#B#compaction#487 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:23,481 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#A#compaction#488 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:23,481 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/eff5dc7d837b49d5aedca4d86eea3c89 is 50, key is test_row_0/B:col10/1732130841919/Put/seqid=0 2024-11-20T19:27:23,481 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/939ef10933514134b408bee8b512bea6 is 50, key is test_row_0/A:col10/1732130841919/Put/seqid=0 2024-11-20T19:27:23,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742399_1575 (size=13187) 2024-11-20T19:27:23,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742400_1576 (size=12301) 2024-11-20T19:27:23,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742401_1577 (size=13187) 2024-11-20T19:27:23,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:23,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:23,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:23,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130903715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:23,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130903718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:23,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130903718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:23,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130903725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:23,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:23,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130903826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:23,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130903828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:23,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130903828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:23,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130903833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:23,893 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/b767be6318754c309159aba0f1634fc4 2024-11-20T19:27:23,896 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/eff5dc7d837b49d5aedca4d86eea3c89 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/eff5dc7d837b49d5aedca4d86eea3c89 2024-11-20T19:27:23,896 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/939ef10933514134b408bee8b512bea6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/939ef10933514134b408bee8b512bea6 2024-11-20T19:27:23,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/920421403f8942758a390ca0311d9d23 is 50, key is test_row_0/B:col10/1732130842548/Put/seqid=0 2024-11-20T19:27:23,902 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/B of ddf3bf4b0d5353d829b30f0de5c7c11a into eff5dc7d837b49d5aedca4d86eea3c89(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:23,902 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/A of ddf3bf4b0d5353d829b30f0de5c7c11a into 939ef10933514134b408bee8b512bea6(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:23,902 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:23,902 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:23,902 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/A, priority=13, startTime=1732130843472; duration=0sec 2024-11-20T19:27:23,902 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/B, priority=13, startTime=1732130843472; duration=0sec 2024-11-20T19:27:23,902 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:23,902 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:B 2024-11-20T19:27:23,902 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:23,902 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:A 2024-11-20T19:27:23,903 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:23,903 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:23,903 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/C is initiating minor compaction (all files) 2024-11-20T19:27:23,903 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/C in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:23,904 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8ee310ad2f3e4f348f3692d69a04c552, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/5c51558a7e8a4dad847f0f941260e6c2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/e58d7974bf33424987ac7db610263a7c] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=34.4 K 2024-11-20T19:27:23,904 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ee310ad2f3e4f348f3692d69a04c552, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732130840093 2024-11-20T19:27:23,905 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c51558a7e8a4dad847f0f941260e6c2, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732130840736 2024-11-20T19:27:23,905 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting e58d7974bf33424987ac7db610263a7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732130841913 2024-11-20T19:27:23,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742402_1578 (size=12301) 2024-11-20T19:27:23,910 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#C#compaction#490 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:23,910 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/22308a37774f418d8a80b72ad0e5a617 is 50, key is test_row_0/C:col10/1732130841919/Put/seqid=0 2024-11-20T19:27:23,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742403_1579 (size=13187) 2024-11-20T19:27:24,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130904033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130904034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130904034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130904043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:24,306 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/920421403f8942758a390ca0311d9d23 2024-11-20T19:27:24,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/7abf532f9b4d47109d5fa82b0ff06db1 is 50, key is test_row_0/C:col10/1732130842548/Put/seqid=0 2024-11-20T19:27:24,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742404_1580 (size=12301) 2024-11-20T19:27:24,317 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/22308a37774f418d8a80b72ad0e5a617 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/22308a37774f418d8a80b72ad0e5a617 2024-11-20T19:27:24,320 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/C of ddf3bf4b0d5353d829b30f0de5c7c11a into 22308a37774f418d8a80b72ad0e5a617(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:24,320 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:24,321 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/C, priority=13, startTime=1732130843472; duration=0sec 2024-11-20T19:27:24,321 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:24,321 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:C 2024-11-20T19:27:24,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130904338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130904339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130904339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130904347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,715 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/7abf532f9b4d47109d5fa82b0ff06db1 2024-11-20T19:27:24,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/b767be6318754c309159aba0f1634fc4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b767be6318754c309159aba0f1634fc4 2024-11-20T19:27:24,720 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b767be6318754c309159aba0f1634fc4, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T19:27:24,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/920421403f8942758a390ca0311d9d23 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/920421403f8942758a390ca0311d9d23 2024-11-20T19:27:24,723 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/920421403f8942758a390ca0311d9d23, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T19:27:24,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/7abf532f9b4d47109d5fa82b0ff06db1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7abf532f9b4d47109d5fa82b0ff06db1 2024-11-20T19:27:24,726 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7abf532f9b4d47109d5fa82b0ff06db1, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T19:27:24,727 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for ddf3bf4b0d5353d829b30f0de5c7c11a in 1252ms, sequenceid=377, compaction requested=false 2024-11-20T19:27:24,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:24,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:24,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-20T19:27:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-20T19:27:24,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-20T19:27:24,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5620 sec 2024-11-20T19:27:24,729 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.5650 sec 2024-11-20T19:27:24,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:24,845 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T19:27:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:24,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/b0bf8390f2d54c128a96bb68cd997bab is 50, key is test_row_0/A:col10/1732130843717/Put/seqid=0 2024-11-20T19:27:24,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742405_1581 (size=14741) 2024-11-20T19:27:24,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130904851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130904850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130904853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130904856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130904957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130904961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:24,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:24,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130904961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:25,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130905162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:25,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130905166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:25,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130905166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/b0bf8390f2d54c128a96bb68cd997bab 2024-11-20T19:27:25,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/38a2efd6c07a4efb9781c8c767123eca is 50, key is test_row_0/B:col10/1732130843717/Put/seqid=0 2024-11-20T19:27:25,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742406_1582 (size=12301) 2024-11-20T19:27:25,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:25,270 INFO [Thread-2203 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-20T19:27:25,271 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:25,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-20T19:27:25,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:25,273 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:25,273 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:25,273 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:25,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:25,424 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T19:27:25,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:25,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:25,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:25,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:25,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:25,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:25,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130905466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:25,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130905470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:25,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130905472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:25,576 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T19:27:25,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:25,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:25,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:25,577 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:25,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:25,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:25,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/38a2efd6c07a4efb9781c8c767123eca 2024-11-20T19:27:25,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/873ffeff8ff944c39c94e0822d6edbf5 is 50, key is test_row_0/C:col10/1732130843717/Put/seqid=0 2024-11-20T19:27:25,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742407_1583 (size=12301) 2024-11-20T19:27:25,728 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T19:27:25,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:25,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:25,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:25,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:25,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:25,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:25,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130905860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:25,880 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T19:27:25,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:25,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:25,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:25,881 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:25,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:25,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:25,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:25,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130905972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:25,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130905972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:25,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:25,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130905976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:26,033 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:26,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T19:27:26,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:26,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:26,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:26,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:26,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:26,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:26,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/873ffeff8ff944c39c94e0822d6edbf5 2024-11-20T19:27:26,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/b0bf8390f2d54c128a96bb68cd997bab as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b0bf8390f2d54c128a96bb68cd997bab 2024-11-20T19:27:26,078 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b0bf8390f2d54c128a96bb68cd997bab, entries=200, sequenceid=408, filesize=14.4 K 2024-11-20T19:27:26,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/38a2efd6c07a4efb9781c8c767123eca as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/38a2efd6c07a4efb9781c8c767123eca 2024-11-20T19:27:26,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/38a2efd6c07a4efb9781c8c767123eca, entries=150, sequenceid=408, filesize=12.0 K 2024-11-20T19:27:26,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/873ffeff8ff944c39c94e0822d6edbf5 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/873ffeff8ff944c39c94e0822d6edbf5 2024-11-20T19:27:26,084 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/873ffeff8ff944c39c94e0822d6edbf5, entries=150, sequenceid=408, filesize=12.0 K 2024-11-20T19:27:26,084 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for ddf3bf4b0d5353d829b30f0de5c7c11a in 1239ms, sequenceid=408, compaction requested=true 2024-11-20T19:27:26,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:26,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:26,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:26,085 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:26,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:26,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:26,085 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:26,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:26,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:26,085 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:26,085 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:26,085 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/B is initiating minor compaction (all files) 2024-11-20T19:27:26,085 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/A is initiating minor compaction (all files) 2024-11-20T19:27:26,085 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/A in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:26,085 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/B in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:26,085 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/eff5dc7d837b49d5aedca4d86eea3c89, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/920421403f8942758a390ca0311d9d23, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/38a2efd6c07a4efb9781c8c767123eca] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=36.9 K 2024-11-20T19:27:26,085 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/939ef10933514134b408bee8b512bea6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b767be6318754c309159aba0f1634fc4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b0bf8390f2d54c128a96bb68cd997bab] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=39.3 K 2024-11-20T19:27:26,086 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting eff5dc7d837b49d5aedca4d86eea3c89, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732130841913 2024-11-20T19:27:26,086 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 939ef10933514134b408bee8b512bea6, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732130841913 2024-11-20T19:27:26,086 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 920421403f8942758a390ca0311d9d23, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732130842544 2024-11-20T19:27:26,086 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting b767be6318754c309159aba0f1634fc4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732130842544 2024-11-20T19:27:26,086 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0bf8390f2d54c128a96bb68cd997bab, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732130843717 2024-11-20T19:27:26,086 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 38a2efd6c07a4efb9781c8c767123eca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732130843717 2024-11-20T19:27:26,091 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#B#compaction#495 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:26,091 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#A#compaction#496 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:26,091 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/5e8a319a56a648518d217cf9dd112289 is 50, key is test_row_0/B:col10/1732130843717/Put/seqid=0 2024-11-20T19:27:26,091 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/79e973423c3a41b7aa631ebf656a8d1f is 50, key is test_row_0/A:col10/1732130843717/Put/seqid=0 2024-11-20T19:27:26,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742408_1584 (size=13289) 2024-11-20T19:27:26,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742409_1585 (size=13289) 2024-11-20T19:27:26,185 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:26,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T19:27:26,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:26,186 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T19:27:26,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:26,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:26,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:26,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:26,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:26,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:26,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/70f2452b8f2046c28be0ac8822060975 is 50, key is test_row_0/A:col10/1732130844850/Put/seqid=0 2024-11-20T19:27:26,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742410_1586 (size=12301) 2024-11-20T19:27:26,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:26,497 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/5e8a319a56a648518d217cf9dd112289 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5e8a319a56a648518d217cf9dd112289 2024-11-20T19:27:26,497 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/79e973423c3a41b7aa631ebf656a8d1f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/79e973423c3a41b7aa631ebf656a8d1f 2024-11-20T19:27:26,500 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/A of ddf3bf4b0d5353d829b30f0de5c7c11a into 79e973423c3a41b7aa631ebf656a8d1f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:26,500 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/B of ddf3bf4b0d5353d829b30f0de5c7c11a into 5e8a319a56a648518d217cf9dd112289(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:26,500 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:26,500 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:26,500 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/A, priority=13, startTime=1732130846084; duration=0sec 2024-11-20T19:27:26,500 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/B, priority=13, startTime=1732130846085; duration=0sec 2024-11-20T19:27:26,500 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:26,500 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:26,500 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:B 2024-11-20T19:27:26,500 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:A 2024-11-20T19:27:26,501 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:26,501 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:26,501 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/C is initiating minor compaction (all files) 2024-11-20T19:27:26,501 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/C in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:26,501 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/22308a37774f418d8a80b72ad0e5a617, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7abf532f9b4d47109d5fa82b0ff06db1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/873ffeff8ff944c39c94e0822d6edbf5] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=36.9 K 2024-11-20T19:27:26,502 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 22308a37774f418d8a80b72ad0e5a617, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732130841913 2024-11-20T19:27:26,502 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 7abf532f9b4d47109d5fa82b0ff06db1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732130842544 2024-11-20T19:27:26,502 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 873ffeff8ff944c39c94e0822d6edbf5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732130843717 2024-11-20T19:27:26,506 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#C#compaction#498 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:26,506 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/899b8b226d2e49f6ab35b69ce9160b9c is 50, key is test_row_0/C:col10/1732130843717/Put/seqid=0 2024-11-20T19:27:26,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742411_1587 (size=13289) 2024-11-20T19:27:26,592 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/70f2452b8f2046c28be0ac8822060975 2024-11-20T19:27:26,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/019665f97f114cdca6e668fce5c582d7 is 50, key is test_row_0/B:col10/1732130844850/Put/seqid=0 2024-11-20T19:27:26,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742412_1588 (size=12301) 2024-11-20T19:27:26,912 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/899b8b226d2e49f6ab35b69ce9160b9c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/899b8b226d2e49f6ab35b69ce9160b9c 2024-11-20T19:27:26,927 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/C of ddf3bf4b0d5353d829b30f0de5c7c11a into 899b8b226d2e49f6ab35b69ce9160b9c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:26,927 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:26,927 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/C, priority=13, startTime=1732130846085; duration=0sec 2024-11-20T19:27:26,927 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:26,927 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:C 2024-11-20T19:27:26,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:26,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:27,001 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/019665f97f114cdca6e668fce5c582d7 2024-11-20T19:27:27,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/d4085c634398410685ad20e59150a397 is 50, key is test_row_0/C:col10/1732130844850/Put/seqid=0 2024-11-20T19:27:27,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742413_1589 (size=12301) 2024-11-20T19:27:27,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130907031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130907031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130907036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130907140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130907140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130907141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130907345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130907345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130907345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:27,409 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/d4085c634398410685ad20e59150a397 2024-11-20T19:27:27,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/70f2452b8f2046c28be0ac8822060975 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/70f2452b8f2046c28be0ac8822060975 2024-11-20T19:27:27,417 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/70f2452b8f2046c28be0ac8822060975, entries=150, sequenceid=416, filesize=12.0 K 2024-11-20T19:27:27,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/019665f97f114cdca6e668fce5c582d7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/019665f97f114cdca6e668fce5c582d7 2024-11-20T19:27:27,422 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/019665f97f114cdca6e668fce5c582d7, entries=150, sequenceid=416, filesize=12.0 K 2024-11-20T19:27:27,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/d4085c634398410685ad20e59150a397 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d4085c634398410685ad20e59150a397 2024-11-20T19:27:27,426 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d4085c634398410685ad20e59150a397, entries=150, sequenceid=416, filesize=12.0 K 2024-11-20T19:27:27,426 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for ddf3bf4b0d5353d829b30f0de5c7c11a in 1241ms, sequenceid=416, compaction requested=false 2024-11-20T19:27:27,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:27,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:27,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-20T19:27:27,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-20T19:27:27,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-20T19:27:27,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1540 sec 2024-11-20T19:27:27,429 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 2.1560 sec 2024-11-20T19:27:27,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:27,651 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T19:27:27,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:27,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:27,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:27,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:27,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:27,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:27,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/768e2abefe994321bacb0137d2a7ff67 is 50, key is test_row_0/A:col10/1732130847030/Put/seqid=0 2024-11-20T19:27:27,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742414_1590 (size=14741) 2024-11-20T19:27:27,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130907654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/768e2abefe994321bacb0137d2a7ff67 2024-11-20T19:27:27,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130907655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130907658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/a5dd8e398a9e44c98860321bb21a22ef is 50, key is test_row_0/B:col10/1732130847030/Put/seqid=0 2024-11-20T19:27:27,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742415_1591 (size=12301) 2024-11-20T19:27:27,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130907758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130907762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130907765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59004 deadline: 1732130907872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,874 DEBUG [Thread-2195 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., hostname=db9c3a6c6492,41229,1732130701496, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:27,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130907963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130907966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:27,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:27,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130907969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:28,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/a5dd8e398a9e44c98860321bb21a22ef 2024-11-20T19:27:28,083 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/150cf25c8119448da06a80b44dc2dfb7 is 50, key is test_row_0/C:col10/1732130847030/Put/seqid=0 2024-11-20T19:27:28,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742416_1592 (size=12301) 2024-11-20T19:27:28,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:28,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130908269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:28,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:28,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130908271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:28,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:28,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130908272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:28,486 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/150cf25c8119448da06a80b44dc2dfb7 2024-11-20T19:27:28,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/768e2abefe994321bacb0137d2a7ff67 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/768e2abefe994321bacb0137d2a7ff67 2024-11-20T19:27:28,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/768e2abefe994321bacb0137d2a7ff67, entries=200, sequenceid=448, filesize=14.4 K 2024-11-20T19:27:28,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/a5dd8e398a9e44c98860321bb21a22ef as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/a5dd8e398a9e44c98860321bb21a22ef 2024-11-20T19:27:28,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/a5dd8e398a9e44c98860321bb21a22ef, entries=150, sequenceid=448, filesize=12.0 K 2024-11-20T19:27:28,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/150cf25c8119448da06a80b44dc2dfb7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/150cf25c8119448da06a80b44dc2dfb7 2024-11-20T19:27:28,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/150cf25c8119448da06a80b44dc2dfb7, entries=150, sequenceid=448, filesize=12.0 K 2024-11-20T19:27:28,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for ddf3bf4b0d5353d829b30f0de5c7c11a in 847ms, sequenceid=448, compaction requested=true 2024-11-20T19:27:28,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:28,498 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:28,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:28,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:28,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:28,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:28,498 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:28,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:28,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:28,499 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40331 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:28,499 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:28,499 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/A is initiating minor compaction (all files) 2024-11-20T19:27:28,499 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/B is initiating minor compaction (all files) 2024-11-20T19:27:28,499 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/B in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:28,499 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/A in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:28,499 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/79e973423c3a41b7aa631ebf656a8d1f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/70f2452b8f2046c28be0ac8822060975, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/768e2abefe994321bacb0137d2a7ff67] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=39.4 K 2024-11-20T19:27:28,499 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5e8a319a56a648518d217cf9dd112289, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/019665f97f114cdca6e668fce5c582d7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/a5dd8e398a9e44c98860321bb21a22ef] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=37.0 K 2024-11-20T19:27:28,499 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79e973423c3a41b7aa631ebf656a8d1f, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732130843717 2024-11-20T19:27:28,499 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e8a319a56a648518d217cf9dd112289, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732130843717 2024-11-20T19:27:28,499 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70f2452b8f2046c28be0ac8822060975, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732130844847 2024-11-20T19:27:28,499 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 019665f97f114cdca6e668fce5c582d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732130844847 2024-11-20T19:27:28,499 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 768e2abefe994321bacb0137d2a7ff67, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732130847028 2024-11-20T19:27:28,499 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a5dd8e398a9e44c98860321bb21a22ef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732130847028 2024-11-20T19:27:28,504 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#A#compaction#504 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:28,504 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/26eeb5e1175f4143b6b830a97e8630f0 is 50, key is test_row_0/A:col10/1732130847030/Put/seqid=0 2024-11-20T19:27:28,506 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#B#compaction#505 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:28,507 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/c4368a96c8354f8cac49cf059d59c025 is 50, key is test_row_0/B:col10/1732130847030/Put/seqid=0 2024-11-20T19:27:28,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742417_1593 (size=13391) 2024-11-20T19:27:28,516 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/26eeb5e1175f4143b6b830a97e8630f0 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/26eeb5e1175f4143b6b830a97e8630f0 2024-11-20T19:27:28,520 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/A of ddf3bf4b0d5353d829b30f0de5c7c11a into 26eeb5e1175f4143b6b830a97e8630f0(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:28,520 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:28,520 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/A, priority=13, startTime=1732130848498; duration=0sec 2024-11-20T19:27:28,520 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:28,520 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:A 2024-11-20T19:27:28,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742418_1594 (size=13391) 2024-11-20T19:27:28,520 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:28,521 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:28,521 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/C is initiating minor compaction (all files) 2024-11-20T19:27:28,521 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/C in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:28,521 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/899b8b226d2e49f6ab35b69ce9160b9c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d4085c634398410685ad20e59150a397, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/150cf25c8119448da06a80b44dc2dfb7] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=37.0 K 2024-11-20T19:27:28,521 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 899b8b226d2e49f6ab35b69ce9160b9c, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732130843717 2024-11-20T19:27:28,521 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4085c634398410685ad20e59150a397, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732130844847 2024-11-20T19:27:28,522 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 150cf25c8119448da06a80b44dc2dfb7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732130847028 2024-11-20T19:27:28,527 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#C#compaction#506 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:28,527 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/70b2dd43dd8541b1a8bd914f24127743 is 50, key is test_row_0/C:col10/1732130847030/Put/seqid=0 2024-11-20T19:27:28,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742419_1595 (size=13391) 2024-11-20T19:27:28,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:28,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:27:28,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:28,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:28,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:28,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:28,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:28,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:28,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/95397acf3fc4416583eda6eaf43965b5 is 50, key is test_row_0/A:col10/1732130848779/Put/seqid=0 2024-11-20T19:27:28,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742420_1596 (size=14741) 2024-11-20T19:27:28,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:28,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130908828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:28,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:28,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130908830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:28,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:28,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130908830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:28,923 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/c4368a96c8354f8cac49cf059d59c025 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/c4368a96c8354f8cac49cf059d59c025 2024-11-20T19:27:28,927 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/B of ddf3bf4b0d5353d829b30f0de5c7c11a into c4368a96c8354f8cac49cf059d59c025(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:28,927 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:28,927 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/B, priority=13, startTime=1732130848498; duration=0sec 2024-11-20T19:27:28,927 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:28,927 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:B 2024-11-20T19:27:28,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:28,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130908933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:28,938 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/70b2dd43dd8541b1a8bd914f24127743 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/70b2dd43dd8541b1a8bd914f24127743 2024-11-20T19:27:28,941 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/C of ddf3bf4b0d5353d829b30f0de5c7c11a into 70b2dd43dd8541b1a8bd914f24127743(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:28,941 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:28,941 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/C, priority=13, startTime=1732130848498; duration=0sec 2024-11-20T19:27:28,941 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:28,941 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:C 2024-11-20T19:27:28,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:28,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130908935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:28,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:28,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130908937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130909138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130909142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130909145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/95397acf3fc4416583eda6eaf43965b5 2024-11-20T19:27:29,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/5da069065b3149b697dcfce17309e89b is 50, key is test_row_0/B:col10/1732130848779/Put/seqid=0 2024-11-20T19:27:29,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742421_1597 (size=12301) 2024-11-20T19:27:29,219 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/5da069065b3149b697dcfce17309e89b 2024-11-20T19:27:29,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/779da2e28af94d19bac89cf267429845 is 50, key is test_row_0/C:col10/1732130848779/Put/seqid=0 2024-11-20T19:27:29,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742422_1598 (size=12301) 2024-11-20T19:27:29,250 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/779da2e28af94d19bac89cf267429845 2024-11-20T19:27:29,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/95397acf3fc4416583eda6eaf43965b5 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/95397acf3fc4416583eda6eaf43965b5 2024-11-20T19:27:29,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/95397acf3fc4416583eda6eaf43965b5, entries=200, sequenceid=461, filesize=14.4 K 2024-11-20T19:27:29,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/5da069065b3149b697dcfce17309e89b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5da069065b3149b697dcfce17309e89b 2024-11-20T19:27:29,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5da069065b3149b697dcfce17309e89b, entries=150, sequenceid=461, filesize=12.0 K 2024-11-20T19:27:29,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/779da2e28af94d19bac89cf267429845 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/779da2e28af94d19bac89cf267429845 2024-11-20T19:27:29,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/779da2e28af94d19bac89cf267429845, entries=150, sequenceid=461, filesize=12.0 K 2024-11-20T19:27:29,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ddf3bf4b0d5353d829b30f0de5c7c11a in 491ms, sequenceid=461, compaction requested=false 2024-11-20T19:27:29,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:29,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:29,377 INFO [Thread-2203 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-20T19:27:29,378 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:29,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-20T19:27:29,380 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:29,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:29,381 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:29,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:29,448 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:27:29,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:29,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:29,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:29,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:29,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:29,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:29,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:29,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/4ca54b3874204ee291338ff17e165366 is 50, key is test_row_0/A:col10/1732130848829/Put/seqid=0 2024-11-20T19:27:29,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130909465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:29,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130909476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130909476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742423_1599 (size=14741) 2024-11-20T19:27:29,534 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:29,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:29,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:29,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:29,535 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130909577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130909587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130909587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:29,687 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:29,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:29,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:29,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:29,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,774 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T19:27:29,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130909784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130909794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130909795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,840 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,842 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:29,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:29,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:29,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:29,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,905 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/4ca54b3874204ee291338ff17e165366 2024-11-20T19:27:29,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/10e890a01f8343dea7c06b7e024f682c is 50, key is test_row_0/B:col10/1732130848829/Put/seqid=0 2024-11-20T19:27:29,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59050 deadline: 1732130909937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,946 DEBUG [Thread-2199 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18278 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., hostname=db9c3a6c6492,41229,1732130701496, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:29,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742424_1600 (size=12301) 2024-11-20T19:27:29,957 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/10e890a01f8343dea7c06b7e024f682c 2024-11-20T19:27:29,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:29,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/25faf9fea3c64da8a2116d965d598e6a is 50, key is test_row_0/C:col10/1732130848829/Put/seqid=0 2024-11-20T19:27:29,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:29,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:29,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:29,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:29,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:29,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742425_1601 (size=12301) 2024-11-20T19:27:30,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/25faf9fea3c64da8a2116d965d598e6a 2024-11-20T19:27:30,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/4ca54b3874204ee291338ff17e165366 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/4ca54b3874204ee291338ff17e165366 2024-11-20T19:27:30,022 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/4ca54b3874204ee291338ff17e165366, entries=200, sequenceid=488, filesize=14.4 K 2024-11-20T19:27:30,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/10e890a01f8343dea7c06b7e024f682c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/10e890a01f8343dea7c06b7e024f682c 2024-11-20T19:27:30,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/10e890a01f8343dea7c06b7e024f682c, entries=150, sequenceid=488, filesize=12.0 K 2024-11-20T19:27:30,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/25faf9fea3c64da8a2116d965d598e6a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/25faf9fea3c64da8a2116d965d598e6a 2024-11-20T19:27:30,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/25faf9fea3c64da8a2116d965d598e6a, entries=150, sequenceid=488, filesize=12.0 K 2024-11-20T19:27:30,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ddf3bf4b0d5353d829b30f0de5c7c11a in 590ms, sequenceid=488, compaction requested=true 2024-11-20T19:27:30,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:30,037 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:30,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:30,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:30,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:30,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:30,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:30,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:30,038 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:30,039 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42873 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:30,039 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/A is initiating minor compaction (all files) 2024-11-20T19:27:30,039 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/A in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:30,039 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/26eeb5e1175f4143b6b830a97e8630f0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/95397acf3fc4416583eda6eaf43965b5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/4ca54b3874204ee291338ff17e165366] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=41.9 K 2024-11-20T19:27:30,040 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:30,040 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/B is initiating minor compaction (all files) 2024-11-20T19:27:30,040 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/B in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:30,040 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/c4368a96c8354f8cac49cf059d59c025, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5da069065b3149b697dcfce17309e89b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/10e890a01f8343dea7c06b7e024f682c] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=37.1 K 2024-11-20T19:27:30,040 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26eeb5e1175f4143b6b830a97e8630f0, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732130847028 2024-11-20T19:27:30,040 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c4368a96c8354f8cac49cf059d59c025, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732130847028 2024-11-20T19:27:30,040 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95397acf3fc4416583eda6eaf43965b5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732130848775 2024-11-20T19:27:30,040 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5da069065b3149b697dcfce17309e89b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732130848775 2024-11-20T19:27:30,040 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 10e890a01f8343dea7c06b7e024f682c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732130848827 2024-11-20T19:27:30,041 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ca54b3874204ee291338ff17e165366, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732130848814 2024-11-20T19:27:30,053 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#B#compaction#513 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:30,053 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/3c925664b20b471b80145fe5ceeccec4 is 50, key is test_row_0/B:col10/1732130848829/Put/seqid=0 2024-11-20T19:27:30,064 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#A#compaction#514 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:30,065 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/8dc45561be5d474fb22f74aa85b91ff1 is 50, key is test_row_0/A:col10/1732130848829/Put/seqid=0 2024-11-20T19:27:30,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742426_1602 (size=13493) 2024-11-20T19:27:30,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:30,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:27:30,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:30,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:30,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:30,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:30,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:30,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:30,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742427_1603 (size=13493) 2024-11-20T19:27:30,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/b228bbbbf56e4d6fa3ba4958c0f0b703 is 50, key is test_row_0/A:col10/1732130850095/Put/seqid=0 2024-11-20T19:27:30,132 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/8dc45561be5d474fb22f74aa85b91ff1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/8dc45561be5d474fb22f74aa85b91ff1 2024-11-20T19:27:30,147 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,147 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/A of ddf3bf4b0d5353d829b30f0de5c7c11a into 8dc45561be5d474fb22f74aa85b91ff1(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:30,147 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:30,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:30,147 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/A, priority=13, startTime=1732130850037; duration=0sec 2024-11-20T19:27:30,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:30,148 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:30,148 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:A 2024-11-20T19:27:30,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:30,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:30,148 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:30,148 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,155 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:30,155 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/C is initiating minor compaction (all files) 2024-11-20T19:27:30,155 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/C in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:30,155 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/70b2dd43dd8541b1a8bd914f24127743, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/779da2e28af94d19bac89cf267429845, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/25faf9fea3c64da8a2116d965d598e6a] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=37.1 K 2024-11-20T19:27:30,157 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70b2dd43dd8541b1a8bd914f24127743, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732130847028 2024-11-20T19:27:30,158 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 779da2e28af94d19bac89cf267429845, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732130848775 2024-11-20T19:27:30,159 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25faf9fea3c64da8a2116d965d598e6a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732130848827 2024-11-20T19:27:30,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130910171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130910174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130910174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742428_1604 (size=14741) 2024-11-20T19:27:30,202 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#C#compaction#516 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:30,202 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/8196c8b5698a44b180c21c4409fe3fc5 is 50, key is test_row_0/C:col10/1732130848829/Put/seqid=0 2024-11-20T19:27:30,203 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/b228bbbbf56e4d6fa3ba4958c0f0b703 2024-11-20T19:27:30,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/834617e1df9949e9a524d47b8b5cac51 is 50, key is test_row_0/B:col10/1732130850095/Put/seqid=0 2024-11-20T19:27:30,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742429_1605 (size=13493) 2024-11-20T19:27:30,257 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/8196c8b5698a44b180c21c4409fe3fc5 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8196c8b5698a44b180c21c4409fe3fc5 2024-11-20T19:27:30,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742430_1606 (size=12301) 2024-11-20T19:27:30,264 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/C of ddf3bf4b0d5353d829b30f0de5c7c11a into 8196c8b5698a44b180c21c4409fe3fc5(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:30,264 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:30,265 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/C, priority=13, startTime=1732130850038; duration=0sec 2024-11-20T19:27:30,265 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:30,265 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:C 2024-11-20T19:27:30,267 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/834617e1df9949e9a524d47b8b5cac51 2024-11-20T19:27:30,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/12a8a4608fc84eb09b7235eea5c54ff1 is 50, key is test_row_0/C:col10/1732130850095/Put/seqid=0 2024-11-20T19:27:30,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130910287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130910287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130910287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,301 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:30,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:30,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:30,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:30,302 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742431_1607 (size=12301) 2024-11-20T19:27:30,455 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:30,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:30,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:30,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:30,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:30,499 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/3c925664b20b471b80145fe5ceeccec4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/3c925664b20b471b80145fe5ceeccec4 2024-11-20T19:27:30,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130910496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130910496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,506 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/B of ddf3bf4b0d5353d829b30f0de5c7c11a into 3c925664b20b471b80145fe5ceeccec4(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:30,506 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:30,506 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/B, priority=13, startTime=1732130850038; duration=0sec 2024-11-20T19:27:30,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,506 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:30,506 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:B 2024-11-20T19:27:30,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130910496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,608 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:30,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:30,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:30,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:30,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/12a8a4608fc84eb09b7235eea5c54ff1 2024-11-20T19:27:30,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/b228bbbbf56e4d6fa3ba4958c0f0b703 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b228bbbbf56e4d6fa3ba4958c0f0b703 2024-11-20T19:27:30,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b228bbbbf56e4d6fa3ba4958c0f0b703, entries=200, sequenceid=500, filesize=14.4 K 2024-11-20T19:27:30,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/834617e1df9949e9a524d47b8b5cac51 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/834617e1df9949e9a524d47b8b5cac51 2024-11-20T19:27:30,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/834617e1df9949e9a524d47b8b5cac51, entries=150, sequenceid=500, filesize=12.0 K 2024-11-20T19:27:30,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/12a8a4608fc84eb09b7235eea5c54ff1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/12a8a4608fc84eb09b7235eea5c54ff1 2024-11-20T19:27:30,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/12a8a4608fc84eb09b7235eea5c54ff1, entries=150, sequenceid=500, filesize=12.0 K 2024-11-20T19:27:30,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ddf3bf4b0d5353d829b30f0de5c7c11a in 642ms, sequenceid=500, compaction requested=false 2024-11-20T19:27:30,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:30,760 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:30,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:30,761 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:27:30,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:30,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:30,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:30,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:30,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:30,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:30,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/019e1507c2e74285a7ce4e4c4944b02a is 50, key is test_row_0/A:col10/1732130850173/Put/seqid=0 2024-11-20T19:27:30,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742432_1608 (size=12301) 2024-11-20T19:27:30,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:30,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. as already flushing 2024-11-20T19:27:30,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130910833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130910835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130910835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130910939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130910939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:30,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130910939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:31,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130911146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:31,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130911146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:31,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130911147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:31,193 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=527 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/019e1507c2e74285a7ce4e4c4944b02a 2024-11-20T19:27:31,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/e0199919377443908f93d0a497b76873 is 50, key is test_row_0/B:col10/1732130850173/Put/seqid=0 2024-11-20T19:27:31,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742433_1609 (size=12301) 2024-11-20T19:27:31,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59002 deadline: 1732130911453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:31,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58968 deadline: 1732130911453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:31,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59036 deadline: 1732130911455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:31,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:31,631 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=527 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/e0199919377443908f93d0a497b76873 2024-11-20T19:27:31,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/feb945d8a33f4b0f94e2b21b34fdd160 is 50, key is test_row_0/C:col10/1732130850173/Put/seqid=0 2024-11-20T19:27:31,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742434_1610 (size=12301) 2024-11-20T19:27:31,674 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=527 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/feb945d8a33f4b0f94e2b21b34fdd160 2024-11-20T19:27:31,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/019e1507c2e74285a7ce4e4c4944b02a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/019e1507c2e74285a7ce4e4c4944b02a 2024-11-20T19:27:31,688 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/019e1507c2e74285a7ce4e4c4944b02a, entries=150, sequenceid=527, filesize=12.0 K 2024-11-20T19:27:31,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/e0199919377443908f93d0a497b76873 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/e0199919377443908f93d0a497b76873 2024-11-20T19:27:31,697 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/e0199919377443908f93d0a497b76873, entries=150, sequenceid=527, filesize=12.0 K 2024-11-20T19:27:31,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/feb945d8a33f4b0f94e2b21b34fdd160 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/feb945d8a33f4b0f94e2b21b34fdd160 2024-11-20T19:27:31,703 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/feb945d8a33f4b0f94e2b21b34fdd160, entries=150, sequenceid=527, filesize=12.0 K 2024-11-20T19:27:31,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T19:27:31,711 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ddf3bf4b0d5353d829b30f0de5c7c11a in 950ms, sequenceid=527, compaction requested=true 2024-11-20T19:27:31,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:31,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:31,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-20T19:27:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-20T19:27:31,714 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-20T19:27:31,714 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3310 sec 2024-11-20T19:27:31,715 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 2.3360 sec 2024-11-20T19:27:31,724 DEBUG [Thread-2206 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b7f20c4 to 127.0.0.1:49985 2024-11-20T19:27:31,724 DEBUG [Thread-2206 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:31,727 DEBUG [Thread-2208 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f7c40ba to 127.0.0.1:49985 2024-11-20T19:27:31,727 DEBUG [Thread-2210 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41b0e7b6 to 127.0.0.1:49985 2024-11-20T19:27:31,727 DEBUG [Thread-2210 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:31,727 DEBUG [Thread-2208 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:31,729 DEBUG [Thread-2204 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7daa5922 to 127.0.0.1:49985 2024-11-20T19:27:31,730 DEBUG [Thread-2204 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:31,734 DEBUG [Thread-2212 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f2423f3 to 127.0.0.1:49985 2024-11-20T19:27:31,734 DEBUG [Thread-2212 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:31,909 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:27:31,909 DEBUG [Thread-2195 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x695c2253 to 127.0.0.1:49985 2024-11-20T19:27:31,909 DEBUG [Thread-2195 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:31,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:31,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:31,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:31,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:31,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:31,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:31,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/a7e14400d0ae495e965dd715bea18263 is 50, key is test_row_0/A:col10/1732130851908/Put/seqid=0 2024-11-20T19:27:31,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742435_1611 (size=12301) 2024-11-20T19:27:31,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/a7e14400d0ae495e965dd715bea18263 2024-11-20T19:27:31,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/90d271ded8ae46a6a38b7ae1c9a867fa is 50, key is test_row_0/B:col10/1732130851908/Put/seqid=0 2024-11-20T19:27:31,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742436_1612 (size=12301) 2024-11-20T19:27:31,953 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/90d271ded8ae46a6a38b7ae1c9a867fa 2024-11-20T19:27:31,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/db757947ec8747ebb05e575b1bef3f9e is 50, key is test_row_0/C:col10/1732130851908/Put/seqid=0 2024-11-20T19:27:31,963 DEBUG [Thread-2193 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3512017b to 127.0.0.1:49985 2024-11-20T19:27:31,963 DEBUG [Thread-2193 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:31,964 DEBUG [Thread-2197 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7177efc9 to 127.0.0.1:49985 2024-11-20T19:27:31,964 DEBUG [Thread-2197 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:31,966 DEBUG [Thread-2201 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7043f683 to 127.0.0.1:49985 2024-11-20T19:27:31,966 DEBUG [Thread-2201 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:31,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742437_1613 (size=12301) 2024-11-20T19:27:32,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/db757947ec8747ebb05e575b1bef3f9e 2024-11-20T19:27:32,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/a7e14400d0ae495e965dd715bea18263 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/a7e14400d0ae495e965dd715bea18263 2024-11-20T19:27:32,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/a7e14400d0ae495e965dd715bea18263, entries=150, sequenceid=540, filesize=12.0 K 2024-11-20T19:27:32,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/90d271ded8ae46a6a38b7ae1c9a867fa as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/90d271ded8ae46a6a38b7ae1c9a867fa 2024-11-20T19:27:32,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/90d271ded8ae46a6a38b7ae1c9a867fa, entries=150, sequenceid=540, filesize=12.0 K 2024-11-20T19:27:32,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/db757947ec8747ebb05e575b1bef3f9e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/db757947ec8747ebb05e575b1bef3f9e 2024-11-20T19:27:32,392 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/db757947ec8747ebb05e575b1bef3f9e, entries=150, sequenceid=540, filesize=12.0 K 2024-11-20T19:27:32,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=20.13 KB/20610 for ddf3bf4b0d5353d829b30f0de5c7c11a in 484ms, sequenceid=540, compaction requested=true 2024-11-20T19:27:32,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:32,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:32,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:32,393 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:32,393 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:32,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:32,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:32,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddf3bf4b0d5353d829b30f0de5c7c11a:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:32,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:32,394 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52836 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:32,394 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50396 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:32,394 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/A is initiating minor compaction (all files) 2024-11-20T19:27:32,394 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/B is initiating minor compaction (all files) 2024-11-20T19:27:32,394 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/A in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:32,394 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/B in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:32,394 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/8dc45561be5d474fb22f74aa85b91ff1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b228bbbbf56e4d6fa3ba4958c0f0b703, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/019e1507c2e74285a7ce4e4c4944b02a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/a7e14400d0ae495e965dd715bea18263] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=51.6 K 2024-11-20T19:27:32,394 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/3c925664b20b471b80145fe5ceeccec4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/834617e1df9949e9a524d47b8b5cac51, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/e0199919377443908f93d0a497b76873, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/90d271ded8ae46a6a38b7ae1c9a867fa] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=49.2 K 2024-11-20T19:27:32,394 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8dc45561be5d474fb22f74aa85b91ff1, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732130848827 2024-11-20T19:27:32,395 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c925664b20b471b80145fe5ceeccec4, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732130848827 2024-11-20T19:27:32,395 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting b228bbbbf56e4d6fa3ba4958c0f0b703, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1732130849463 2024-11-20T19:27:32,395 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 834617e1df9949e9a524d47b8b5cac51, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1732130849474 2024-11-20T19:27:32,395 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 019e1507c2e74285a7ce4e4c4944b02a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=527, earliestPutTs=1732130850161 2024-11-20T19:27:32,395 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting e0199919377443908f93d0a497b76873, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=527, earliestPutTs=1732130850161 2024-11-20T19:27:32,395 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7e14400d0ae495e965dd715bea18263, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1732130850832 2024-11-20T19:27:32,395 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 90d271ded8ae46a6a38b7ae1c9a867fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1732130850832 2024-11-20T19:27:32,404 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#B#compaction#526 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:32,404 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#A#compaction#525 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:32,405 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/937e138c5868418592c0e8bd08f3cbe6 is 50, key is test_row_0/A:col10/1732130851908/Put/seqid=0 2024-11-20T19:27:32,405 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/5737f92a5e4f41a8b11225b69f305251 is 50, key is test_row_0/B:col10/1732130851908/Put/seqid=0 2024-11-20T19:27:32,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742439_1615 (size=13629) 2024-11-20T19:27:32,415 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/5737f92a5e4f41a8b11225b69f305251 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5737f92a5e4f41a8b11225b69f305251 2024-11-20T19:27:32,418 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/B of ddf3bf4b0d5353d829b30f0de5c7c11a into 5737f92a5e4f41a8b11225b69f305251(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:32,418 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:32,418 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/B, priority=12, startTime=1732130852393; duration=0sec 2024-11-20T19:27:32,418 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:32,418 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:B 2024-11-20T19:27:32,418 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:32,419 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50396 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:32,419 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): ddf3bf4b0d5353d829b30f0de5c7c11a/C is initiating minor compaction (all files) 2024-11-20T19:27:32,419 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ddf3bf4b0d5353d829b30f0de5c7c11a/C in TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:32,419 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8196c8b5698a44b180c21c4409fe3fc5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/12a8a4608fc84eb09b7235eea5c54ff1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/feb945d8a33f4b0f94e2b21b34fdd160, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/db757947ec8747ebb05e575b1bef3f9e] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp, totalSize=49.2 K 2024-11-20T19:27:32,419 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8196c8b5698a44b180c21c4409fe3fc5, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732130848827 2024-11-20T19:27:32,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742438_1614 (size=13629) 2024-11-20T19:27:32,420 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 12a8a4608fc84eb09b7235eea5c54ff1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1732130849474 2024-11-20T19:27:32,420 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting feb945d8a33f4b0f94e2b21b34fdd160, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=527, earliestPutTs=1732130850161 2024-11-20T19:27:32,421 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting db757947ec8747ebb05e575b1bef3f9e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1732130850832 2024-11-20T19:27:32,424 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/937e138c5868418592c0e8bd08f3cbe6 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/937e138c5868418592c0e8bd08f3cbe6 2024-11-20T19:27:32,428 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/A of ddf3bf4b0d5353d829b30f0de5c7c11a into 937e138c5868418592c0e8bd08f3cbe6(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:32,428 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:32,428 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/A, priority=12, startTime=1732130852393; duration=0sec 2024-11-20T19:27:32,428 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:32,428 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:A 2024-11-20T19:27:32,431 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddf3bf4b0d5353d829b30f0de5c7c11a#C#compaction#527 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:32,431 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/8c1bfbb998704dd59a021b321c2b6eac is 50, key is test_row_0/C:col10/1732130851908/Put/seqid=0 2024-11-20T19:27:32,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742440_1616 (size=13629) 2024-11-20T19:27:32,838 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/8c1bfbb998704dd59a021b321c2b6eac as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8c1bfbb998704dd59a021b321c2b6eac 2024-11-20T19:27:32,842 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ddf3bf4b0d5353d829b30f0de5c7c11a/C of ddf3bf4b0d5353d829b30f0de5c7c11a into 8c1bfbb998704dd59a021b321c2b6eac(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:32,842 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:32,842 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a., storeName=ddf3bf4b0d5353d829b30f0de5c7c11a/C, priority=12, startTime=1732130852393; duration=0sec 2024-11-20T19:27:32,842 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:32,842 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddf3bf4b0d5353d829b30f0de5c7c11a:C 2024-11-20T19:27:33,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:33,486 INFO [Thread-2203 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-20T19:27:40,026 DEBUG [Thread-2199 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61d38088 to 127.0.0.1:49985 2024-11-20T19:27:40,027 DEBUG [Thread-2199 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 113 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 82 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 115 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 1 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 115 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1900 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5700 rows 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1898 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5694 rows 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1895 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5685 rows 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1915 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5745 rows 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1889 2024-11-20T19:27:40,027 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5667 rows 2024-11-20T19:27:40,027 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:27:40,027 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3fa53591 to 127.0.0.1:49985 2024-11-20T19:27:40,027 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:40,029 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T19:27:40,029 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T19:27:40,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T19:27:40,035 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130860035"}]},"ts":"1732130860035"} 2024-11-20T19:27:40,036 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T19:27:40,099 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T19:27:40,099 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:27:40,100 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ddf3bf4b0d5353d829b30f0de5c7c11a, UNASSIGN}] 2024-11-20T19:27:40,101 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ddf3bf4b0d5353d829b30f0de5c7c11a, UNASSIGN 2024-11-20T19:27:40,101 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=144 updating hbase:meta row=ddf3bf4b0d5353d829b30f0de5c7c11a, regionState=CLOSING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:40,102 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:27:40,102 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; CloseRegionProcedure ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:27:40,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T19:27:40,253 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:40,253 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] handler.UnassignRegionHandler(124): Close ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:40,253 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:27:40,253 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1681): Closing ddf3bf4b0d5353d829b30f0de5c7c11a, disabling compactions & flushes 2024-11-20T19:27:40,254 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:40,254 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:40,254 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. after waiting 0 ms 2024-11-20T19:27:40,254 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:40,254 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(2837): Flushing ddf3bf4b0d5353d829b30f0de5c7c11a 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T19:27:40,254 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=A 2024-11-20T19:27:40,254 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:40,254 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=B 2024-11-20T19:27:40,254 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:40,254 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ddf3bf4b0d5353d829b30f0de5c7c11a, store=C 2024-11-20T19:27:40,254 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:40,257 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/abc1c54c8cad4f05ba174ebbe6a41a1d is 50, key is test_row_0/A:col10/1732130860026/Put/seqid=0 2024-11-20T19:27:40,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742441_1617 (size=9857) 2024-11-20T19:27:40,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T19:27:40,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T19:27:40,661 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=550 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/abc1c54c8cad4f05ba174ebbe6a41a1d 2024-11-20T19:27:40,666 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/bb6c86b0f61d489e91dfb66c3353e485 is 50, key is test_row_0/B:col10/1732130860026/Put/seqid=0 2024-11-20T19:27:40,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742442_1618 (size=9857) 2024-11-20T19:27:41,070 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=550 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/bb6c86b0f61d489e91dfb66c3353e485 2024-11-20T19:27:41,075 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/29383bf9a2164250b063a10fd93152ea is 50, key is test_row_0/C:col10/1732130860026/Put/seqid=0 2024-11-20T19:27:41,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742443_1619 (size=9857) 2024-11-20T19:27:41,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T19:27:41,481 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=550 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/29383bf9a2164250b063a10fd93152ea 2024-11-20T19:27:41,484 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/A/abc1c54c8cad4f05ba174ebbe6a41a1d as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/abc1c54c8cad4f05ba174ebbe6a41a1d 2024-11-20T19:27:41,487 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/abc1c54c8cad4f05ba174ebbe6a41a1d, entries=100, sequenceid=550, filesize=9.6 K 2024-11-20T19:27:41,488 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/B/bb6c86b0f61d489e91dfb66c3353e485 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bb6c86b0f61d489e91dfb66c3353e485 2024-11-20T19:27:41,490 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bb6c86b0f61d489e91dfb66c3353e485, entries=100, sequenceid=550, filesize=9.6 K 2024-11-20T19:27:41,491 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/.tmp/C/29383bf9a2164250b063a10fd93152ea as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/29383bf9a2164250b063a10fd93152ea 2024-11-20T19:27:41,494 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/29383bf9a2164250b063a10fd93152ea, entries=100, sequenceid=550, filesize=9.6 K 2024-11-20T19:27:41,494 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for ddf3bf4b0d5353d829b30f0de5c7c11a in 1240ms, sequenceid=550, compaction requested=false 2024-11-20T19:27:41,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d9ce0b66c5b14e2db8dac624d3399986, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/1b1c10a55489451aa830442e0def28f7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/586aa0ba38c94ce8a51abfbc35b68c0e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/ceed9f84ed9f4208a9b27e5bea685dc6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/27eb932721674e9fa8c93c31e7a0ec90, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d61d26b481974741bde54b0e813f143f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/7b346544fb77426b859d3020cf3bc53f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/df8a2db4f80343af84f6a3357a8f2e81, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/179b5e5c8e8c4e98ba81ffd420bcb9da, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/eee6d639d19343b287768c53def371ac, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/e5e25ee6a7954bdfa33f008ec30c5033, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/475c809f590042138c770b242540da96, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/9540fa4d9165401c9bc0ceb4c7ec8775, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/06c04cbfb6724acabd218bb07741e875, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/789f0ed603524fa5b02ff7a71eec3b6c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/024cab535c4b45f48081679b20257094, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/86de3260331e49dea3d245a999d49c5e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/13b7ef3a59b942c3846f9cc867fd1e69, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/7bcc156b7aa44737980c6c366637518f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/1befc85a318e4284af20cfb94215f0c0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/839caeddc9554df99feb45a81b746173, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/357bf079291b44648244206f1254f9c4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d104ff4b99d84c54bb70a1c556b5f813, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/94c58e15b9a64706af81bb47e69ee24e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/f1d09436093f467bb175402e4d2ba3bf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/419fe85b2121483bbb25e7cc63286718, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/939ef10933514134b408bee8b512bea6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b767be6318754c309159aba0f1634fc4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b0bf8390f2d54c128a96bb68cd997bab, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/79e973423c3a41b7aa631ebf656a8d1f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/70f2452b8f2046c28be0ac8822060975, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/768e2abefe994321bacb0137d2a7ff67, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/26eeb5e1175f4143b6b830a97e8630f0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/95397acf3fc4416583eda6eaf43965b5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/4ca54b3874204ee291338ff17e165366, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/8dc45561be5d474fb22f74aa85b91ff1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b228bbbbf56e4d6fa3ba4958c0f0b703, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/019e1507c2e74285a7ce4e4c4944b02a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/a7e14400d0ae495e965dd715bea18263] to archive 2024-11-20T19:27:41,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:41,497 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d9ce0b66c5b14e2db8dac624d3399986 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d9ce0b66c5b14e2db8dac624d3399986 2024-11-20T19:27:41,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/1b1c10a55489451aa830442e0def28f7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/1b1c10a55489451aa830442e0def28f7 2024-11-20T19:27:41,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/586aa0ba38c94ce8a51abfbc35b68c0e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/586aa0ba38c94ce8a51abfbc35b68c0e 2024-11-20T19:27:41,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/ceed9f84ed9f4208a9b27e5bea685dc6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/ceed9f84ed9f4208a9b27e5bea685dc6 2024-11-20T19:27:41,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/27eb932721674e9fa8c93c31e7a0ec90 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/27eb932721674e9fa8c93c31e7a0ec90 2024-11-20T19:27:41,501 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d61d26b481974741bde54b0e813f143f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d61d26b481974741bde54b0e813f143f 2024-11-20T19:27:41,502 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/7b346544fb77426b859d3020cf3bc53f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/7b346544fb77426b859d3020cf3bc53f 2024-11-20T19:27:41,503 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/df8a2db4f80343af84f6a3357a8f2e81 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/df8a2db4f80343af84f6a3357a8f2e81 2024-11-20T19:27:41,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/179b5e5c8e8c4e98ba81ffd420bcb9da to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/179b5e5c8e8c4e98ba81ffd420bcb9da 2024-11-20T19:27:41,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/eee6d639d19343b287768c53def371ac to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/eee6d639d19343b287768c53def371ac 2024-11-20T19:27:41,505 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/e5e25ee6a7954bdfa33f008ec30c5033 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/e5e25ee6a7954bdfa33f008ec30c5033 2024-11-20T19:27:41,506 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/475c809f590042138c770b242540da96 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/475c809f590042138c770b242540da96 2024-11-20T19:27:41,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/9540fa4d9165401c9bc0ceb4c7ec8775 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/9540fa4d9165401c9bc0ceb4c7ec8775 2024-11-20T19:27:41,508 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/06c04cbfb6724acabd218bb07741e875 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/06c04cbfb6724acabd218bb07741e875 2024-11-20T19:27:41,508 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/789f0ed603524fa5b02ff7a71eec3b6c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/789f0ed603524fa5b02ff7a71eec3b6c 2024-11-20T19:27:41,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/024cab535c4b45f48081679b20257094 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/024cab535c4b45f48081679b20257094 2024-11-20T19:27:41,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/86de3260331e49dea3d245a999d49c5e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/86de3260331e49dea3d245a999d49c5e 2024-11-20T19:27:41,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/13b7ef3a59b942c3846f9cc867fd1e69 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/13b7ef3a59b942c3846f9cc867fd1e69 2024-11-20T19:27:41,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/7bcc156b7aa44737980c6c366637518f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/7bcc156b7aa44737980c6c366637518f 2024-11-20T19:27:41,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/1befc85a318e4284af20cfb94215f0c0 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/1befc85a318e4284af20cfb94215f0c0 2024-11-20T19:27:41,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/839caeddc9554df99feb45a81b746173 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/839caeddc9554df99feb45a81b746173 2024-11-20T19:27:41,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/357bf079291b44648244206f1254f9c4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/357bf079291b44648244206f1254f9c4 2024-11-20T19:27:41,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d104ff4b99d84c54bb70a1c556b5f813 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/d104ff4b99d84c54bb70a1c556b5f813 2024-11-20T19:27:41,516 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/94c58e15b9a64706af81bb47e69ee24e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/94c58e15b9a64706af81bb47e69ee24e 2024-11-20T19:27:41,516 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/f1d09436093f467bb175402e4d2ba3bf to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/f1d09436093f467bb175402e4d2ba3bf 2024-11-20T19:27:41,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/419fe85b2121483bbb25e7cc63286718 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/419fe85b2121483bbb25e7cc63286718 2024-11-20T19:27:41,518 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/939ef10933514134b408bee8b512bea6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/939ef10933514134b408bee8b512bea6 2024-11-20T19:27:41,519 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b767be6318754c309159aba0f1634fc4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b767be6318754c309159aba0f1634fc4 2024-11-20T19:27:41,520 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b0bf8390f2d54c128a96bb68cd997bab to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b0bf8390f2d54c128a96bb68cd997bab 2024-11-20T19:27:41,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/79e973423c3a41b7aa631ebf656a8d1f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/79e973423c3a41b7aa631ebf656a8d1f 2024-11-20T19:27:41,522 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/70f2452b8f2046c28be0ac8822060975 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/70f2452b8f2046c28be0ac8822060975 2024-11-20T19:27:41,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/768e2abefe994321bacb0137d2a7ff67 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/768e2abefe994321bacb0137d2a7ff67 2024-11-20T19:27:41,524 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/26eeb5e1175f4143b6b830a97e8630f0 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/26eeb5e1175f4143b6b830a97e8630f0 2024-11-20T19:27:41,525 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/95397acf3fc4416583eda6eaf43965b5 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/95397acf3fc4416583eda6eaf43965b5 2024-11-20T19:27:41,526 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/4ca54b3874204ee291338ff17e165366 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/4ca54b3874204ee291338ff17e165366 2024-11-20T19:27:41,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/8dc45561be5d474fb22f74aa85b91ff1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/8dc45561be5d474fb22f74aa85b91ff1 2024-11-20T19:27:41,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b228bbbbf56e4d6fa3ba4958c0f0b703 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/b228bbbbf56e4d6fa3ba4958c0f0b703 2024-11-20T19:27:41,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/019e1507c2e74285a7ce4e4c4944b02a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/019e1507c2e74285a7ce4e4c4944b02a 2024-11-20T19:27:41,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/a7e14400d0ae495e965dd715bea18263 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/a7e14400d0ae495e965dd715bea18263 2024-11-20T19:27:41,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/cb28c1a21d044b10b68854097b2ed8eb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/8c441eb4c5404a119bd08109ef6b47a4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/0775e61840614dfba4bbd29e4fdc8644, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/3e1f430fe67649519f7d3cadda9d9c08, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/ba2a3bfde5e14681ae0112d04e0fc492, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/fa843023fdb04876892ac94ab97410fe, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/a784675ce2fa42cfb7ee49b64f952ba6, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/33c5b2ae88894323897e8479667caf9e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/06178cbe2d024b19928bf2db35297965, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/991fa7573854436b86a4254253c469d8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/8a21055d2cd1405c8570a2d680a57832, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/816dfeef83ed4a8496d2bda268ff5cbf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bf243f2a92dc4ca39b12fdc9339ef93c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/b7e55ebe355c46ee9e089ccc514f951e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/1a9f314dac574e43b6083415c0260dc9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/c25d3f509f124f4bb6b013b2bf6b8942, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bf95fc2d27dd469fa71592c7ebf6daba, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/43f0cc7f93bd4f3d86adb3da57109494, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/d51c432d750e406da66213b8eada476b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/da1c2f37934448528a9c17452a1fe692, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/aa9c5173269a498d89ce9a567005dbfb, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/f3b9966d7f75432598c2a8198b9e9b3e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/b5991448caa44b6681ea1969ca9e1e94, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/7967bb0d1c5345a4bbe504e28de316a2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/9cae62bfa0dd4ef8978e68103d7cd787, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/eff5dc7d837b49d5aedca4d86eea3c89, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/cea672ae52e84d5998905614404d0b06, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/920421403f8942758a390ca0311d9d23, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5e8a319a56a648518d217cf9dd112289, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/38a2efd6c07a4efb9781c8c767123eca, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/019665f97f114cdca6e668fce5c582d7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/c4368a96c8354f8cac49cf059d59c025, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/a5dd8e398a9e44c98860321bb21a22ef, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5da069065b3149b697dcfce17309e89b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/3c925664b20b471b80145fe5ceeccec4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/10e890a01f8343dea7c06b7e024f682c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/834617e1df9949e9a524d47b8b5cac51, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/e0199919377443908f93d0a497b76873, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/90d271ded8ae46a6a38b7ae1c9a867fa] to archive 2024-11-20T19:27:41,531 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:41,532 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/cb28c1a21d044b10b68854097b2ed8eb to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/cb28c1a21d044b10b68854097b2ed8eb 2024-11-20T19:27:41,533 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/8c441eb4c5404a119bd08109ef6b47a4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/8c441eb4c5404a119bd08109ef6b47a4 2024-11-20T19:27:41,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/0775e61840614dfba4bbd29e4fdc8644 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/0775e61840614dfba4bbd29e4fdc8644 2024-11-20T19:27:41,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/3e1f430fe67649519f7d3cadda9d9c08 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/3e1f430fe67649519f7d3cadda9d9c08 2024-11-20T19:27:41,535 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/ba2a3bfde5e14681ae0112d04e0fc492 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/ba2a3bfde5e14681ae0112d04e0fc492 2024-11-20T19:27:41,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/fa843023fdb04876892ac94ab97410fe to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/fa843023fdb04876892ac94ab97410fe 2024-11-20T19:27:41,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/a784675ce2fa42cfb7ee49b64f952ba6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/a784675ce2fa42cfb7ee49b64f952ba6 2024-11-20T19:27:41,537 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/33c5b2ae88894323897e8479667caf9e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/33c5b2ae88894323897e8479667caf9e 2024-11-20T19:27:41,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/06178cbe2d024b19928bf2db35297965 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/06178cbe2d024b19928bf2db35297965 2024-11-20T19:27:41,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/991fa7573854436b86a4254253c469d8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/991fa7573854436b86a4254253c469d8 2024-11-20T19:27:41,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/8a21055d2cd1405c8570a2d680a57832 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/8a21055d2cd1405c8570a2d680a57832 2024-11-20T19:27:41,540 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/816dfeef83ed4a8496d2bda268ff5cbf to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/816dfeef83ed4a8496d2bda268ff5cbf 2024-11-20T19:27:41,541 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bf243f2a92dc4ca39b12fdc9339ef93c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bf243f2a92dc4ca39b12fdc9339ef93c 2024-11-20T19:27:41,541 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/b7e55ebe355c46ee9e089ccc514f951e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/b7e55ebe355c46ee9e089ccc514f951e 2024-11-20T19:27:41,542 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/1a9f314dac574e43b6083415c0260dc9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/1a9f314dac574e43b6083415c0260dc9 2024-11-20T19:27:41,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/c25d3f509f124f4bb6b013b2bf6b8942 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/c25d3f509f124f4bb6b013b2bf6b8942 2024-11-20T19:27:41,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bf95fc2d27dd469fa71592c7ebf6daba to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bf95fc2d27dd469fa71592c7ebf6daba 2024-11-20T19:27:41,548 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/43f0cc7f93bd4f3d86adb3da57109494 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/43f0cc7f93bd4f3d86adb3da57109494 2024-11-20T19:27:41,548 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/d51c432d750e406da66213b8eada476b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/d51c432d750e406da66213b8eada476b 2024-11-20T19:27:41,549 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/da1c2f37934448528a9c17452a1fe692 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/da1c2f37934448528a9c17452a1fe692 2024-11-20T19:27:41,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/aa9c5173269a498d89ce9a567005dbfb to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/aa9c5173269a498d89ce9a567005dbfb 2024-11-20T19:27:41,551 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/f3b9966d7f75432598c2a8198b9e9b3e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/f3b9966d7f75432598c2a8198b9e9b3e 2024-11-20T19:27:41,552 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/b5991448caa44b6681ea1969ca9e1e94 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/b5991448caa44b6681ea1969ca9e1e94 2024-11-20T19:27:41,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/7967bb0d1c5345a4bbe504e28de316a2 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/7967bb0d1c5345a4bbe504e28de316a2 2024-11-20T19:27:41,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/9cae62bfa0dd4ef8978e68103d7cd787 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/9cae62bfa0dd4ef8978e68103d7cd787 2024-11-20T19:27:41,554 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/eff5dc7d837b49d5aedca4d86eea3c89 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/eff5dc7d837b49d5aedca4d86eea3c89 2024-11-20T19:27:41,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/cea672ae52e84d5998905614404d0b06 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/cea672ae52e84d5998905614404d0b06 2024-11-20T19:27:41,557 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/920421403f8942758a390ca0311d9d23 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/920421403f8942758a390ca0311d9d23 2024-11-20T19:27:41,557 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5e8a319a56a648518d217cf9dd112289 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5e8a319a56a648518d217cf9dd112289 2024-11-20T19:27:41,558 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/38a2efd6c07a4efb9781c8c767123eca to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/38a2efd6c07a4efb9781c8c767123eca 2024-11-20T19:27:41,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/019665f97f114cdca6e668fce5c582d7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/019665f97f114cdca6e668fce5c582d7 2024-11-20T19:27:41,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/c4368a96c8354f8cac49cf059d59c025 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/c4368a96c8354f8cac49cf059d59c025 2024-11-20T19:27:41,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/a5dd8e398a9e44c98860321bb21a22ef to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/a5dd8e398a9e44c98860321bb21a22ef 2024-11-20T19:27:41,562 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5da069065b3149b697dcfce17309e89b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5da069065b3149b697dcfce17309e89b 2024-11-20T19:27:41,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/3c925664b20b471b80145fe5ceeccec4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/3c925664b20b471b80145fe5ceeccec4 2024-11-20T19:27:41,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/10e890a01f8343dea7c06b7e024f682c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/10e890a01f8343dea7c06b7e024f682c 2024-11-20T19:27:41,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/834617e1df9949e9a524d47b8b5cac51 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/834617e1df9949e9a524d47b8b5cac51 2024-11-20T19:27:41,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/e0199919377443908f93d0a497b76873 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/e0199919377443908f93d0a497b76873 2024-11-20T19:27:41,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/90d271ded8ae46a6a38b7ae1c9a867fa to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/90d271ded8ae46a6a38b7ae1c9a867fa 2024-11-20T19:27:41,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7697a16a7ad2472386b41ced7dec6872, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/81ddbc842b064777a4ef0cfc3c924c2e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/dee60cebf98c4feb8486b35213351a27, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/16d74abb7e8c4d3db2a982c48b9427c3, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/58272e5e02cc496ba6d4f0ad77c7f6f8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/072607276d44435d8805c55ec9848ba5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/125421f46b61444ebd924be91130e944, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/dc5db976d5944f54ab6834ff40c33d1a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/2f20d7f4cb9a451b877feb68ff02e255, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/5850d9fc82d34b4b8d3727be16909d91, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/f76e1e6c1f4645d38634c7aba93cdf2e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/3f61f03e5cd34bf8a464f2bba102941b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/bda324bbfe064e7ca7fd109c1f4d9a3a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d9281cfde3af4c9f91928068d9604d6a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/060f02b78ae34287a0b97f4e31d1bb6e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/846e2e31566e4f278dd1239e545c4ab4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d983f2920d8549798fc27711cbdbd385, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/3151875afb704053b06a9e66299e607d, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7bdb34d0c05240f487af3de5da77925e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7680918979834ce3ad5ba2330f86f21e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/bf4ffd2b955f46088cd596fcbd9fe910, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/e0a8054621424ac893864f9acefa9a74, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8ee310ad2f3e4f348f3692d69a04c552, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/80b92d1ef727448eb9a1f37867513375, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/5c51558a7e8a4dad847f0f941260e6c2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/22308a37774f418d8a80b72ad0e5a617, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/e58d7974bf33424987ac7db610263a7c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7abf532f9b4d47109d5fa82b0ff06db1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/899b8b226d2e49f6ab35b69ce9160b9c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/873ffeff8ff944c39c94e0822d6edbf5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d4085c634398410685ad20e59150a397, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/70b2dd43dd8541b1a8bd914f24127743, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/150cf25c8119448da06a80b44dc2dfb7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/779da2e28af94d19bac89cf267429845, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8196c8b5698a44b180c21c4409fe3fc5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/25faf9fea3c64da8a2116d965d598e6a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/12a8a4608fc84eb09b7235eea5c54ff1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/feb945d8a33f4b0f94e2b21b34fdd160, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/db757947ec8747ebb05e575b1bef3f9e] to archive 2024-11-20T19:27:41,569 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:41,570 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7697a16a7ad2472386b41ced7dec6872 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7697a16a7ad2472386b41ced7dec6872 2024-11-20T19:27:41,571 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/81ddbc842b064777a4ef0cfc3c924c2e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/81ddbc842b064777a4ef0cfc3c924c2e 2024-11-20T19:27:41,572 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/dee60cebf98c4feb8486b35213351a27 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/dee60cebf98c4feb8486b35213351a27 2024-11-20T19:27:41,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/16d74abb7e8c4d3db2a982c48b9427c3 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/16d74abb7e8c4d3db2a982c48b9427c3 2024-11-20T19:27:41,574 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/58272e5e02cc496ba6d4f0ad77c7f6f8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/58272e5e02cc496ba6d4f0ad77c7f6f8 2024-11-20T19:27:41,574 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/072607276d44435d8805c55ec9848ba5 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/072607276d44435d8805c55ec9848ba5 2024-11-20T19:27:41,575 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/125421f46b61444ebd924be91130e944 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/125421f46b61444ebd924be91130e944 2024-11-20T19:27:41,576 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/dc5db976d5944f54ab6834ff40c33d1a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/dc5db976d5944f54ab6834ff40c33d1a 2024-11-20T19:27:41,577 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/2f20d7f4cb9a451b877feb68ff02e255 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/2f20d7f4cb9a451b877feb68ff02e255 2024-11-20T19:27:41,578 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/5850d9fc82d34b4b8d3727be16909d91 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/5850d9fc82d34b4b8d3727be16909d91 2024-11-20T19:27:41,578 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/f76e1e6c1f4645d38634c7aba93cdf2e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/f76e1e6c1f4645d38634c7aba93cdf2e 2024-11-20T19:27:41,579 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/3f61f03e5cd34bf8a464f2bba102941b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/3f61f03e5cd34bf8a464f2bba102941b 2024-11-20T19:27:41,580 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/bda324bbfe064e7ca7fd109c1f4d9a3a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/bda324bbfe064e7ca7fd109c1f4d9a3a 2024-11-20T19:27:41,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d9281cfde3af4c9f91928068d9604d6a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d9281cfde3af4c9f91928068d9604d6a 2024-11-20T19:27:41,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/060f02b78ae34287a0b97f4e31d1bb6e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/060f02b78ae34287a0b97f4e31d1bb6e 2024-11-20T19:27:41,584 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/846e2e31566e4f278dd1239e545c4ab4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/846e2e31566e4f278dd1239e545c4ab4 2024-11-20T19:27:41,584 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d983f2920d8549798fc27711cbdbd385 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d983f2920d8549798fc27711cbdbd385 2024-11-20T19:27:41,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/3151875afb704053b06a9e66299e607d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/3151875afb704053b06a9e66299e607d 2024-11-20T19:27:41,586 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7bdb34d0c05240f487af3de5da77925e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7bdb34d0c05240f487af3de5da77925e 2024-11-20T19:27:41,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7680918979834ce3ad5ba2330f86f21e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7680918979834ce3ad5ba2330f86f21e 2024-11-20T19:27:41,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/bf4ffd2b955f46088cd596fcbd9fe910 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/bf4ffd2b955f46088cd596fcbd9fe910 2024-11-20T19:27:41,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/e0a8054621424ac893864f9acefa9a74 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/e0a8054621424ac893864f9acefa9a74 2024-11-20T19:27:41,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8ee310ad2f3e4f348f3692d69a04c552 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8ee310ad2f3e4f348f3692d69a04c552 2024-11-20T19:27:41,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/80b92d1ef727448eb9a1f37867513375 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/80b92d1ef727448eb9a1f37867513375 2024-11-20T19:27:41,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/5c51558a7e8a4dad847f0f941260e6c2 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/5c51558a7e8a4dad847f0f941260e6c2 2024-11-20T19:27:41,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/22308a37774f418d8a80b72ad0e5a617 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/22308a37774f418d8a80b72ad0e5a617 2024-11-20T19:27:41,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/e58d7974bf33424987ac7db610263a7c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/e58d7974bf33424987ac7db610263a7c 2024-11-20T19:27:41,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7abf532f9b4d47109d5fa82b0ff06db1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/7abf532f9b4d47109d5fa82b0ff06db1 2024-11-20T19:27:41,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/899b8b226d2e49f6ab35b69ce9160b9c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/899b8b226d2e49f6ab35b69ce9160b9c 2024-11-20T19:27:41,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/873ffeff8ff944c39c94e0822d6edbf5 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/873ffeff8ff944c39c94e0822d6edbf5 2024-11-20T19:27:41,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d4085c634398410685ad20e59150a397 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/d4085c634398410685ad20e59150a397 2024-11-20T19:27:41,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/70b2dd43dd8541b1a8bd914f24127743 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/70b2dd43dd8541b1a8bd914f24127743 2024-11-20T19:27:41,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/150cf25c8119448da06a80b44dc2dfb7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/150cf25c8119448da06a80b44dc2dfb7 2024-11-20T19:27:41,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/779da2e28af94d19bac89cf267429845 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/779da2e28af94d19bac89cf267429845 2024-11-20T19:27:41,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8196c8b5698a44b180c21c4409fe3fc5 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8196c8b5698a44b180c21c4409fe3fc5 2024-11-20T19:27:41,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/25faf9fea3c64da8a2116d965d598e6a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/25faf9fea3c64da8a2116d965d598e6a 2024-11-20T19:27:41,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/12a8a4608fc84eb09b7235eea5c54ff1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/12a8a4608fc84eb09b7235eea5c54ff1 2024-11-20T19:27:41,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/feb945d8a33f4b0f94e2b21b34fdd160 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/feb945d8a33f4b0f94e2b21b34fdd160 2024-11-20T19:27:41,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/db757947ec8747ebb05e575b1bef3f9e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/db757947ec8747ebb05e575b1bef3f9e 2024-11-20T19:27:41,607 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/recovered.edits/553.seqid, newMaxSeqId=553, maxSeqId=1 2024-11-20T19:27:41,607 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a. 2024-11-20T19:27:41,607 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1635): Region close journal for ddf3bf4b0d5353d829b30f0de5c7c11a: 2024-11-20T19:27:41,609 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] handler.UnassignRegionHandler(170): Closed ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:41,609 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=144 updating hbase:meta row=ddf3bf4b0d5353d829b30f0de5c7c11a, regionState=CLOSED 2024-11-20T19:27:41,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-20T19:27:41,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; CloseRegionProcedure ddf3bf4b0d5353d829b30f0de5c7c11a, server=db9c3a6c6492,41229,1732130701496 in 1.5080 sec 2024-11-20T19:27:41,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=144, resume processing ppid=143 2024-11-20T19:27:41,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, ppid=143, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ddf3bf4b0d5353d829b30f0de5c7c11a, UNASSIGN in 1.5110 sec 2024-11-20T19:27:41,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-20T19:27:41,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5130 sec 2024-11-20T19:27:41,614 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130861614"}]},"ts":"1732130861614"} 2024-11-20T19:27:41,614 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T19:27:41,657 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T19:27:41,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6290 sec 2024-11-20T19:27:42,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T19:27:42,138 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-20T19:27:42,138 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T19:27:42,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:42,140 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=146, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:42,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T19:27:42,140 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=146, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:42,142 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:42,143 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/recovered.edits] 2024-11-20T19:27:42,145 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/937e138c5868418592c0e8bd08f3cbe6 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/937e138c5868418592c0e8bd08f3cbe6 2024-11-20T19:27:42,146 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/abc1c54c8cad4f05ba174ebbe6a41a1d to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/A/abc1c54c8cad4f05ba174ebbe6a41a1d 2024-11-20T19:27:42,148 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5737f92a5e4f41a8b11225b69f305251 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/5737f92a5e4f41a8b11225b69f305251 2024-11-20T19:27:42,149 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bb6c86b0f61d489e91dfb66c3353e485 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/B/bb6c86b0f61d489e91dfb66c3353e485 2024-11-20T19:27:42,151 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/29383bf9a2164250b063a10fd93152ea to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/29383bf9a2164250b063a10fd93152ea 2024-11-20T19:27:42,152 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8c1bfbb998704dd59a021b321c2b6eac to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/C/8c1bfbb998704dd59a021b321c2b6eac 2024-11-20T19:27:42,154 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/recovered.edits/553.seqid to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a/recovered.edits/553.seqid 2024-11-20T19:27:42,154 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/ddf3bf4b0d5353d829b30f0de5c7c11a 2024-11-20T19:27:42,154 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T19:27:42,156 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=146, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:42,157 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T19:27:42,159 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T19:27:42,160 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=146, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:42,160 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T19:27:42,160 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732130862160"}]},"ts":"9223372036854775807"} 2024-11-20T19:27:42,161 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T19:27:42,161 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ddf3bf4b0d5353d829b30f0de5c7c11a, NAME => 'TestAcidGuarantees,,1732130829250.ddf3bf4b0d5353d829b30f0de5c7c11a.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T19:27:42,161 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T19:27:42,161 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732130862161"}]},"ts":"9223372036854775807"} 2024-11-20T19:27:42,162 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T19:27:42,199 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=146, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:42,200 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 61 msec 2024-11-20T19:27:42,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T19:27:42,241 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-20T19:27:42,251 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=237 (was 238), OpenFileDescriptor=447 (was 456), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=665 (was 662) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3199 (was 3232) 2024-11-20T19:27:42,260 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=237, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=665, ProcessCount=11, AvailableMemoryMB=3198 2024-11-20T19:27:42,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:27:42,262 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:27:42,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:42,263 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:27:42,263 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:42,263 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 147 2024-11-20T19:27:42,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-11-20T19:27:42,264 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:27:42,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742444_1620 (size=960) 2024-11-20T19:27:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-11-20T19:27:42,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-11-20T19:27:42,670 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203 2024-11-20T19:27:42,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742445_1621 (size=53) 2024-11-20T19:27:42,674 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:27:42,674 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6e8af0e10da4be5fa330b00646bb6e13, disabling compactions & flushes 2024-11-20T19:27:42,674 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:42,674 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:42,675 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. after waiting 0 ms 2024-11-20T19:27:42,675 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:42,675 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:42,675 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:42,675 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:27:42,676 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732130862675"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130862675"}]},"ts":"1732130862675"} 2024-11-20T19:27:42,676 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:27:42,677 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:27:42,677 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130862677"}]},"ts":"1732130862677"} 2024-11-20T19:27:42,678 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T19:27:42,724 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e8af0e10da4be5fa330b00646bb6e13, ASSIGN}] 2024-11-20T19:27:42,725 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e8af0e10da4be5fa330b00646bb6e13, ASSIGN 2024-11-20T19:27:42,725 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e8af0e10da4be5fa330b00646bb6e13, ASSIGN; state=OFFLINE, location=db9c3a6c6492,41229,1732130701496; forceNewPlan=false, retain=false 2024-11-20T19:27:42,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-11-20T19:27:42,876 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=6e8af0e10da4be5fa330b00646bb6e13, regionState=OPENING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:42,877 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; OpenRegionProcedure 6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:27:43,028 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:43,031 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:43,031 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(7285): Opening region: {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:27:43,031 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:43,031 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:27:43,031 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(7327): checking encryption for 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:43,032 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(7330): checking classloading for 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:43,033 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:43,034 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:43,034 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e8af0e10da4be5fa330b00646bb6e13 columnFamilyName A 2024-11-20T19:27:43,034 DEBUG [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:43,034 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.HStore(327): Store=6e8af0e10da4be5fa330b00646bb6e13/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:43,034 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:43,035 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:43,035 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e8af0e10da4be5fa330b00646bb6e13 columnFamilyName B 2024-11-20T19:27:43,036 DEBUG [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:43,036 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.HStore(327): Store=6e8af0e10da4be5fa330b00646bb6e13/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:43,036 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:43,037 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:43,037 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e8af0e10da4be5fa330b00646bb6e13 columnFamilyName C 2024-11-20T19:27:43,037 DEBUG [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:43,037 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.HStore(327): Store=6e8af0e10da4be5fa330b00646bb6e13/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:43,037 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:43,038 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:43,038 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:43,039 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:27:43,040 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1085): writing seq id for 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:43,042 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:27:43,042 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1102): Opened 6e8af0e10da4be5fa330b00646bb6e13; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64242005, jitterRate=-0.0427195280790329}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:27:43,043 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1001): Region open journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:43,043 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., pid=149, masterSystemTime=1732130863028 2024-11-20T19:27:43,044 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:43,044 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:43,045 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=6e8af0e10da4be5fa330b00646bb6e13, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:43,046 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-20T19:27:43,047 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; OpenRegionProcedure 6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 in 168 msec 2024-11-20T19:27:43,048 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-11-20T19:27:43,048 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e8af0e10da4be5fa330b00646bb6e13, ASSIGN in 323 msec 2024-11-20T19:27:43,048 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:27:43,048 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130863048"}]},"ts":"1732130863048"} 2024-11-20T19:27:43,049 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T19:27:43,058 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:27:43,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 796 msec 2024-11-20T19:27:43,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-11-20T19:27:43,367 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 147 completed 2024-11-20T19:27:43,368 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x635b1751 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@593af048 2024-11-20T19:27:43,424 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cbd2497, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:43,425 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:43,426 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46534, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:43,426 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:27:43,427 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55522, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:27:43,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:27:43,429 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:27:43,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:43,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742446_1622 (size=996) 2024-11-20T19:27:43,839 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T19:27:43,839 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T19:27:43,840 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:27:43,841 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e8af0e10da4be5fa330b00646bb6e13, REOPEN/MOVE}] 2024-11-20T19:27:43,842 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e8af0e10da4be5fa330b00646bb6e13, REOPEN/MOVE 2024-11-20T19:27:43,842 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=6e8af0e10da4be5fa330b00646bb6e13, regionState=CLOSING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:43,843 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:27:43,843 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; CloseRegionProcedure 6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:27:43,994 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:43,995 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(124): Close 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:43,995 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:27:43,995 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1681): Closing 6e8af0e10da4be5fa330b00646bb6e13, disabling compactions & flushes 2024-11-20T19:27:43,995 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:43,995 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:43,995 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. after waiting 0 ms 2024-11-20T19:27:43,995 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:43,998 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T19:27:43,998 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:43,998 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1635): Region close journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:43,998 WARN [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionServer(3786): Not adding moved region record: 6e8af0e10da4be5fa330b00646bb6e13 to self. 2024-11-20T19:27:43,999 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(170): Closed 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:44,000 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=6e8af0e10da4be5fa330b00646bb6e13, regionState=CLOSED 2024-11-20T19:27:44,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-20T19:27:44,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; CloseRegionProcedure 6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 in 157 msec 2024-11-20T19:27:44,002 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e8af0e10da4be5fa330b00646bb6e13, REOPEN/MOVE; state=CLOSED, location=db9c3a6c6492,41229,1732130701496; forceNewPlan=false, retain=true 2024-11-20T19:27:44,152 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=6e8af0e10da4be5fa330b00646bb6e13, regionState=OPENING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,153 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=152, state=RUNNABLE; OpenRegionProcedure 6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:27:44,304 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,307 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:44,307 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7285): Opening region: {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:27:44,307 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:44,307 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:27:44,307 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7327): checking encryption for 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:44,307 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7330): checking classloading for 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:44,308 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:44,309 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:44,309 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e8af0e10da4be5fa330b00646bb6e13 columnFamilyName A 2024-11-20T19:27:44,310 DEBUG [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:44,310 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.HStore(327): Store=6e8af0e10da4be5fa330b00646bb6e13/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:44,310 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:44,311 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:44,311 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e8af0e10da4be5fa330b00646bb6e13 columnFamilyName B 2024-11-20T19:27:44,311 DEBUG [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:44,311 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.HStore(327): Store=6e8af0e10da4be5fa330b00646bb6e13/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:44,311 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:44,312 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:44,312 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e8af0e10da4be5fa330b00646bb6e13 columnFamilyName C 2024-11-20T19:27:44,312 DEBUG [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:44,312 INFO [StoreOpener-6e8af0e10da4be5fa330b00646bb6e13-1 {}] regionserver.HStore(327): Store=6e8af0e10da4be5fa330b00646bb6e13/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:44,312 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:44,313 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:44,314 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:44,315 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:27:44,317 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1085): writing seq id for 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:44,317 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1102): Opened 6e8af0e10da4be5fa330b00646bb6e13; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61209540, jitterRate=-0.08790677785873413}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:27:44,318 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1001): Region open journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:44,319 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., pid=154, masterSystemTime=1732130864304 2024-11-20T19:27:44,320 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:44,320 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:44,320 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=6e8af0e10da4be5fa330b00646bb6e13, regionState=OPEN, openSeqNum=5, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,322 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=152 2024-11-20T19:27:44,323 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; OpenRegionProcedure 6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 in 168 msec 2024-11-20T19:27:44,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-11-20T19:27:44,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e8af0e10da4be5fa330b00646bb6e13, REOPEN/MOVE in 481 msec 2024-11-20T19:27:44,325 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-20T19:27:44,325 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 484 msec 2024-11-20T19:27:44,326 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 896 msec 2024-11-20T19:27:44,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T19:27:44,328 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2cbfd84f to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2209c520 2024-11-20T19:27:44,366 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5765d46a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:44,367 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3fb684eb to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@537a66f8 2024-11-20T19:27:44,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ac53e79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:44,375 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0644b7e6 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6094c70 2024-11-20T19:27:44,390 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc9c3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:44,391 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c9b5141 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@103dfc6e 2024-11-20T19:27:44,399 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7181df3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:44,400 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11a52cdf to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e047c09 2024-11-20T19:27:44,407 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11030ef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:44,408 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60d631a3 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69abefea 2024-11-20T19:27:44,416 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b914bf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:44,417 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58971172 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e757135 2024-11-20T19:27:44,424 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f6a59e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:44,425 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d7fe93b to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7846cb78 2024-11-20T19:27:44,432 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@150e08ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:44,433 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11c440f7 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f1754bc 2024-11-20T19:27:44,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a3b66d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:44,441 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58460ef3 to 127.0.0.1:49985 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d9113f3 2024-11-20T19:27:44,449 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cfdf76c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:44,455 DEBUG [hconnection-0x6293115a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:44,455 DEBUG [hconnection-0x43c82564-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:44,456 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46538, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:44,456 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46540, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:44,459 DEBUG [hconnection-0x495223c5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:44,460 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46544, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:44,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:44,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:27:44,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:44,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:44,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:44,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:44,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:44,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:44,472 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:44,472 DEBUG [hconnection-0x4caa7773-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:44,472 DEBUG [hconnection-0x239db8d4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:44,473 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46546, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:44,474 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:44,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-11-20T19:27:44,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T19:27:44,476 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:44,477 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:44,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:44,479 DEBUG [hconnection-0x5563d41a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:44,486 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46566, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:44,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130924488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130924488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130924489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,495 DEBUG [hconnection-0x5991a338-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:44,496 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46576, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:44,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130924498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,500 DEBUG [hconnection-0x4b7a25d2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:44,501 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46578, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:44,505 DEBUG [hconnection-0x4c817ff4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:44,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130924506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,507 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46584, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:44,510 DEBUG [hconnection-0x349795dc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:44,513 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46596, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:44,529 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200f4504b0f3b848e893700933a8ddbee3_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130864464/Put/seqid=0 2024-11-20T19:27:44,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742447_1623 (size=12154) 2024-11-20T19:27:44,571 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:44,575 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200f4504b0f3b848e893700933a8ddbee3_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200f4504b0f3b848e893700933a8ddbee3_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:44,576 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/089425f42ffc466eb41795981ab7ae0c, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:44,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/089425f42ffc466eb41795981ab7ae0c is 175, key is test_row_0/A:col10/1732130864464/Put/seqid=0 2024-11-20T19:27:44,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T19:27:44,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742448_1624 (size=30955) 2024-11-20T19:27:44,589 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/089425f42ffc466eb41795981ab7ae0c 2024-11-20T19:27:44,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130924591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130924592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130924592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130924600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/da531366f7124c6387ed2c666ae17a85 is 50, key is test_row_0/B:col10/1732130864464/Put/seqid=0 2024-11-20T19:27:44,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130924618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:44,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:44,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:44,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:44,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:44,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:44,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:44,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742449_1625 (size=12001) 2024-11-20T19:27:44,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T19:27:44,781 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:44,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:44,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:44,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:44,781 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:44,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:44,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:44,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130924793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130924794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130924795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130924804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130924823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,934 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:44,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:44,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:44,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:44,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:44,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:44,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:44,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/da531366f7124c6387ed2c666ae17a85 2024-11-20T19:27:45,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T19:27:45,086 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:45,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:45,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:45,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:45,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130925096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130925097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/d9534d62116c4a08912fc5a13c3975f7 is 50, key is test_row_0/C:col10/1732130864464/Put/seqid=0 2024-11-20T19:27:45,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130925100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130925108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130925126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742450_1626 (size=12001) 2024-11-20T19:27:45,239 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:45,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:45,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:45,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:45,240 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,392 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:45,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:45,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:45,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:45,393 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,530 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/d9534d62116c4a08912fc5a13c3975f7 2024-11-20T19:27:45,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/089425f42ffc466eb41795981ab7ae0c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/089425f42ffc466eb41795981ab7ae0c 2024-11-20T19:27:45,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/089425f42ffc466eb41795981ab7ae0c, entries=150, sequenceid=16, filesize=30.2 K 2024-11-20T19:27:45,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/da531366f7124c6387ed2c666ae17a85 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/da531366f7124c6387ed2c666ae17a85 2024-11-20T19:27:45,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/da531366f7124c6387ed2c666ae17a85, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T19:27:45,545 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:45,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:45,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:45,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:45,546 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/d9534d62116c4a08912fc5a13c3975f7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d9534d62116c4a08912fc5a13c3975f7 2024-11-20T19:27:45,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,551 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d9534d62116c4a08912fc5a13c3975f7, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T19:27:45,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6e8af0e10da4be5fa330b00646bb6e13 in 1084ms, sequenceid=16, compaction requested=false 2024-11-20T19:27:45,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:45,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T19:27:45,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:45,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:27:45,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:45,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:45,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:45,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:45,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:45,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:45,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130925609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130925613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130925613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130925613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205837e1f1343f4683bd929f5e55a977a0_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130865599/Put/seqid=0 2024-11-20T19:27:45,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130925628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742451_1627 (size=12154) 2024-11-20T19:27:45,697 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:45,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:45,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:45,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:45,698 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130925714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130925718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130925718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,850 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:45,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:45,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:45,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:45,850 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:45,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130925919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130925921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:45,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130925922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,002 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:46,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:46,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,003 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,058 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:46,063 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205837e1f1343f4683bd929f5e55a977a0_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205837e1f1343f4683bd929f5e55a977a0_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:46,063 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/1fa601a545a349de9442bb1a1a58be6a, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:46,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/1fa601a545a349de9442bb1a1a58be6a is 175, key is test_row_0/A:col10/1732130865599/Put/seqid=0 2024-11-20T19:27:46,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742452_1628 (size=30955) 2024-11-20T19:27:46,092 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/1fa601a545a349de9442bb1a1a58be6a 2024-11-20T19:27:46,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/a978658b0801416599a2a3c872ae1e98 is 50, key is test_row_0/B:col10/1732130865599/Put/seqid=0 2024-11-20T19:27:46,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742453_1629 (size=12001) 2024-11-20T19:27:46,145 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/a978658b0801416599a2a3c872ae1e98 2024-11-20T19:27:46,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/f25343e315e242f89c25265b00c01e2b is 50, key is test_row_0/C:col10/1732130865599/Put/seqid=0 2024-11-20T19:27:46,157 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:46,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:46,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742454_1630 (size=12001) 2024-11-20T19:27:46,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/f25343e315e242f89c25265b00c01e2b 2024-11-20T19:27:46,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/1fa601a545a349de9442bb1a1a58be6a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1fa601a545a349de9442bb1a1a58be6a 2024-11-20T19:27:46,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1fa601a545a349de9442bb1a1a58be6a, entries=150, sequenceid=41, filesize=30.2 K 2024-11-20T19:27:46,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/a978658b0801416599a2a3c872ae1e98 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/a978658b0801416599a2a3c872ae1e98 2024-11-20T19:27:46,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/a978658b0801416599a2a3c872ae1e98, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T19:27:46,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/f25343e315e242f89c25265b00c01e2b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/f25343e315e242f89c25265b00c01e2b 2024-11-20T19:27:46,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/f25343e315e242f89c25265b00c01e2b, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T19:27:46,225 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6e8af0e10da4be5fa330b00646bb6e13 in 625ms, sequenceid=41, compaction requested=false 2024-11-20T19:27:46,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:46,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:46,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:27:46,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:46,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:46,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:46,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:46,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:46,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:46,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112082bba7336f704f23a8b07d0f8723c71e_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130865611/Put/seqid=0 2024-11-20T19:27:46,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742455_1631 (size=12154) 2024-11-20T19:27:46,249 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:46,256 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112082bba7336f704f23a8b07d0f8723c71e_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112082bba7336f704f23a8b07d0f8723c71e_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:46,257 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/6d8f921f66a944c7bc5b5056aad92129, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:46,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/6d8f921f66a944c7bc5b5056aad92129 is 175, key is test_row_0/A:col10/1732130865611/Put/seqid=0 2024-11-20T19:27:46,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130926269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130926272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130926273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742456_1632 (size=30955) 2024-11-20T19:27:46,295 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/6d8f921f66a944c7bc5b5056aad92129 2024-11-20T19:27:46,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/eb57aef32ced4c04b0ad5a8871dcc628 is 50, key is test_row_0/B:col10/1732130865611/Put/seqid=0 2024-11-20T19:27:46,310 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:46,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:46,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742457_1633 (size=12001) 2024-11-20T19:27:46,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130926375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130926377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130926379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,464 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:46,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:46,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T19:27:46,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130926579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130926581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130926582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,616 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:46,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:46,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,617 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130926620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130926633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,752 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/eb57aef32ced4c04b0ad5a8871dcc628 2024-11-20T19:27:46,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/924ae7ab8ca044f9ae88f95f134bf66b is 50, key is test_row_0/C:col10/1732130865611/Put/seqid=0 2024-11-20T19:27:46,769 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:46,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:46,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742458_1634 (size=12001) 2024-11-20T19:27:46,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130926886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130926886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130926886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,922 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:46,923 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:46,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:46,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:46,923 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,075 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:47,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:47,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:47,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:47,076 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,137 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T19:27:47,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/924ae7ab8ca044f9ae88f95f134bf66b 2024-11-20T19:27:47,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/6d8f921f66a944c7bc5b5056aad92129 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/6d8f921f66a944c7bc5b5056aad92129 2024-11-20T19:27:47,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/6d8f921f66a944c7bc5b5056aad92129, entries=150, sequenceid=55, filesize=30.2 K 2024-11-20T19:27:47,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/eb57aef32ced4c04b0ad5a8871dcc628 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/eb57aef32ced4c04b0ad5a8871dcc628 2024-11-20T19:27:47,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/eb57aef32ced4c04b0ad5a8871dcc628, entries=150, sequenceid=55, filesize=11.7 K 2024-11-20T19:27:47,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/924ae7ab8ca044f9ae88f95f134bf66b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/924ae7ab8ca044f9ae88f95f134bf66b 2024-11-20T19:27:47,221 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/924ae7ab8ca044f9ae88f95f134bf66b, entries=150, sequenceid=55, filesize=11.7 K 2024-11-20T19:27:47,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6e8af0e10da4be5fa330b00646bb6e13 in 992ms, sequenceid=55, compaction requested=true 2024-11-20T19:27:47,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:47,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:47,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:47,222 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:47,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:47,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:47,222 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:47,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:47,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:47,223 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:47,223 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/A is initiating minor compaction (all files) 2024-11-20T19:27:47,223 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/A in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:47,223 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/089425f42ffc466eb41795981ab7ae0c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1fa601a545a349de9442bb1a1a58be6a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/6d8f921f66a944c7bc5b5056aad92129] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=90.7 K 2024-11-20T19:27:47,223 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:47,223 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:47,223 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/089425f42ffc466eb41795981ab7ae0c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1fa601a545a349de9442bb1a1a58be6a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/6d8f921f66a944c7bc5b5056aad92129] 2024-11-20T19:27:47,223 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/B is initiating minor compaction (all files) 2024-11-20T19:27:47,223 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/B in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:47,223 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/da531366f7124c6387ed2c666ae17a85, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/a978658b0801416599a2a3c872ae1e98, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/eb57aef32ced4c04b0ad5a8871dcc628] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=35.2 K 2024-11-20T19:27:47,223 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 089425f42ffc466eb41795981ab7ae0c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732130864464 2024-11-20T19:27:47,224 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fa601a545a349de9442bb1a1a58be6a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732130864485 2024-11-20T19:27:47,224 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting da531366f7124c6387ed2c666ae17a85, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732130864464 2024-11-20T19:27:47,224 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a978658b0801416599a2a3c872ae1e98, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732130864485 2024-11-20T19:27:47,224 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d8f921f66a944c7bc5b5056aad92129, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732130865611 2024-11-20T19:27:47,225 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting eb57aef32ced4c04b0ad5a8871dcc628, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732130865611 2024-11-20T19:27:47,227 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T19:27:47,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:47,228 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T19:27:47,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:47,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:47,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:47,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:47,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:47,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:47,233 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#B#compaction#540 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:47,233 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/5d2835ef50d44143ada55d2db8d82f8f is 50, key is test_row_0/B:col10/1732130865611/Put/seqid=0 2024-11-20T19:27:47,242 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:47,271 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411201e50bbcf53d941ac970f69a407b7719c_6e8af0e10da4be5fa330b00646bb6e13 store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:47,273 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411201e50bbcf53d941ac970f69a407b7719c_6e8af0e10da4be5fa330b00646bb6e13, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:47,273 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201e50bbcf53d941ac970f69a407b7719c_6e8af0e10da4be5fa330b00646bb6e13 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:47,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f8c695e342fd4f53a53356a3fb1d6e29_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130866272/Put/seqid=0 2024-11-20T19:27:47,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742459_1635 (size=12104) 2024-11-20T19:27:47,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742460_1636 (size=4469) 2024-11-20T19:27:47,314 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/5d2835ef50d44143ada55d2db8d82f8f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/5d2835ef50d44143ada55d2db8d82f8f 2024-11-20T19:27:47,314 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#A#compaction#541 average throughput is 0.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:47,314 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/5d543024ff4c48aba035308a93ee8d41 is 175, key is test_row_0/A:col10/1732130865611/Put/seqid=0 2024-11-20T19:27:47,321 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/B of 6e8af0e10da4be5fa330b00646bb6e13 into 5d2835ef50d44143ada55d2db8d82f8f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:47,321 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:47,321 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/B, priority=13, startTime=1732130867222; duration=0sec 2024-11-20T19:27:47,321 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:47,321 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:B 2024-11-20T19:27:47,321 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:47,323 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:47,323 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/C is initiating minor compaction (all files) 2024-11-20T19:27:47,323 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/C in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:47,323 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d9534d62116c4a08912fc5a13c3975f7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/f25343e315e242f89c25265b00c01e2b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/924ae7ab8ca044f9ae88f95f134bf66b] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=35.2 K 2024-11-20T19:27:47,323 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d9534d62116c4a08912fc5a13c3975f7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732130864464 2024-11-20T19:27:47,323 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting f25343e315e242f89c25265b00c01e2b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732130864485 2024-11-20T19:27:47,324 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 924ae7ab8ca044f9ae88f95f134bf66b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732130865611 2024-11-20T19:27:47,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742461_1637 (size=12154) 2024-11-20T19:27:47,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:47,337 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f8c695e342fd4f53a53356a3fb1d6e29_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f8c695e342fd4f53a53356a3fb1d6e29_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:47,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/b2eca50e75ac4f68a07cb18ff2427e9c, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:47,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/b2eca50e75ac4f68a07cb18ff2427e9c is 175, key is test_row_0/A:col10/1732130866272/Put/seqid=0 2024-11-20T19:27:47,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742462_1638 (size=31058) 2024-11-20T19:27:47,377 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/5d543024ff4c48aba035308a93ee8d41 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/5d543024ff4c48aba035308a93ee8d41 2024-11-20T19:27:47,383 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/A of 6e8af0e10da4be5fa330b00646bb6e13 into 5d543024ff4c48aba035308a93ee8d41(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:47,383 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:47,383 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/A, priority=13, startTime=1732130867222; duration=0sec 2024-11-20T19:27:47,383 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:47,383 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:A 2024-11-20T19:27:47,388 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#C#compaction#543 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:47,389 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/4b8fa00c3203446fb578ac47547f9d78 is 50, key is test_row_0/C:col10/1732130865611/Put/seqid=0 2024-11-20T19:27:47,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:47,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:47,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742463_1639 (size=30955) 2024-11-20T19:27:47,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130927410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,414 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/b2eca50e75ac4f68a07cb18ff2427e9c 2024-11-20T19:27:47,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130927411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130927412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742464_1640 (size=12104) 2024-11-20T19:27:47,444 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/4b8fa00c3203446fb578ac47547f9d78 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/4b8fa00c3203446fb578ac47547f9d78 2024-11-20T19:27:47,449 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/C of 6e8af0e10da4be5fa330b00646bb6e13 into 4b8fa00c3203446fb578ac47547f9d78(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:47,449 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:47,449 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/C, priority=13, startTime=1732130867222; duration=0sec 2024-11-20T19:27:47,450 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:47,450 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:C 2024-11-20T19:27:47,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/06ed4fa72a244e58b800266fc04f42f2 is 50, key is test_row_0/B:col10/1732130866272/Put/seqid=0 2024-11-20T19:27:47,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742465_1641 (size=12001) 2024-11-20T19:27:47,490 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/06ed4fa72a244e58b800266fc04f42f2 2024-11-20T19:27:47,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/e01b70dd4e0b4d8483db9c987b17d92e is 50, key is test_row_0/C:col10/1732130866272/Put/seqid=0 2024-11-20T19:27:47,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130927516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130927516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130927516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742466_1642 (size=12001) 2024-11-20T19:27:47,534 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/e01b70dd4e0b4d8483db9c987b17d92e 2024-11-20T19:27:47,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/b2eca50e75ac4f68a07cb18ff2427e9c as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b2eca50e75ac4f68a07cb18ff2427e9c 2024-11-20T19:27:47,544 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b2eca50e75ac4f68a07cb18ff2427e9c, entries=150, sequenceid=77, filesize=30.2 K 2024-11-20T19:27:47,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/06ed4fa72a244e58b800266fc04f42f2 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/06ed4fa72a244e58b800266fc04f42f2 2024-11-20T19:27:47,552 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/06ed4fa72a244e58b800266fc04f42f2, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T19:27:47,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/e01b70dd4e0b4d8483db9c987b17d92e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/e01b70dd4e0b4d8483db9c987b17d92e 2024-11-20T19:27:47,558 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/e01b70dd4e0b4d8483db9c987b17d92e, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T19:27:47,559 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 6e8af0e10da4be5fa330b00646bb6e13 in 331ms, sequenceid=77, compaction requested=false 2024-11-20T19:27:47,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:47,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:47,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-11-20T19:27:47,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-11-20T19:27:47,562 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-20T19:27:47,562 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0830 sec 2024-11-20T19:27:47,564 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 3.0910 sec 2024-11-20T19:27:47,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:47,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T19:27:47,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:47,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:47,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:47,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:47,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:47,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:47,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fb6b914864f841389092aa95a7bd47c0_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130867409/Put/seqid=0 2024-11-20T19:27:47,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742467_1643 (size=12154) 2024-11-20T19:27:47,748 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:47,751 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fb6b914864f841389092aa95a7bd47c0_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb6b914864f841389092aa95a7bd47c0_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:47,752 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7c438ec58f1d45db81cd6e0b8ec37ec9, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:47,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7c438ec58f1d45db81cd6e0b8ec37ec9 is 175, key is test_row_0/A:col10/1732130867409/Put/seqid=0 2024-11-20T19:27:47,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742468_1644 (size=30955) 2024-11-20T19:27:47,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130927758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,762 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=97, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7c438ec58f1d45db81cd6e0b8ec37ec9 2024-11-20T19:27:47,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130927759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130927761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/fcf4d62229f44b87b349a33f923f6f2a is 50, key is test_row_0/B:col10/1732130867409/Put/seqid=0 2024-11-20T19:27:47,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742469_1645 (size=12001) 2024-11-20T19:27:47,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/fcf4d62229f44b87b349a33f923f6f2a 2024-11-20T19:27:47,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/3383f5e125fc47f5ad092f4bc14bbfde is 50, key is test_row_0/C:col10/1732130867409/Put/seqid=0 2024-11-20T19:27:47,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742470_1646 (size=12001) 2024-11-20T19:27:47,834 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/3383f5e125fc47f5ad092f4bc14bbfde 2024-11-20T19:27:47,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7c438ec58f1d45db81cd6e0b8ec37ec9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7c438ec58f1d45db81cd6e0b8ec37ec9 2024-11-20T19:27:47,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7c438ec58f1d45db81cd6e0b8ec37ec9, entries=150, sequenceid=97, filesize=30.2 K 2024-11-20T19:27:47,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/fcf4d62229f44b87b349a33f923f6f2a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/fcf4d62229f44b87b349a33f923f6f2a 2024-11-20T19:27:47,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/fcf4d62229f44b87b349a33f923f6f2a, entries=150, sequenceid=97, filesize=11.7 K 2024-11-20T19:27:47,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/3383f5e125fc47f5ad092f4bc14bbfde as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/3383f5e125fc47f5ad092f4bc14bbfde 2024-11-20T19:27:47,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130927863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/3383f5e125fc47f5ad092f4bc14bbfde, entries=150, sequenceid=97, filesize=11.7 K 2024-11-20T19:27:47,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 6e8af0e10da4be5fa330b00646bb6e13 in 144ms, sequenceid=97, compaction requested=true 2024-11-20T19:27:47,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:47,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:47,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:47,867 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:47,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:47,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:47,867 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:47,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:47,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:47,868 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:47,868 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/A is initiating minor compaction (all files) 2024-11-20T19:27:47,868 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/A in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:47,868 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/5d543024ff4c48aba035308a93ee8d41, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b2eca50e75ac4f68a07cb18ff2427e9c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7c438ec58f1d45db81cd6e0b8ec37ec9] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=90.8 K 2024-11-20T19:27:47,868 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:47,868 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/5d543024ff4c48aba035308a93ee8d41, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b2eca50e75ac4f68a07cb18ff2427e9c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7c438ec58f1d45db81cd6e0b8ec37ec9] 2024-11-20T19:27:47,868 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:47,868 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/B is initiating minor compaction (all files) 2024-11-20T19:27:47,868 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/B in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:47,869 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/5d2835ef50d44143ada55d2db8d82f8f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/06ed4fa72a244e58b800266fc04f42f2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/fcf4d62229f44b87b349a33f923f6f2a] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=35.3 K 2024-11-20T19:27:47,869 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d543024ff4c48aba035308a93ee8d41, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732130865611 2024-11-20T19:27:47,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:47,869 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2eca50e75ac4f68a07cb18ff2427e9c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732130866268 2024-11-20T19:27:47,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T19:27:47,869 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d2835ef50d44143ada55d2db8d82f8f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732130865611 2024-11-20T19:27:47,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:47,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:47,869 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c438ec58f1d45db81cd6e0b8ec37ec9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732130867403 2024-11-20T19:27:47,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:47,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:47,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:47,869 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 06ed4fa72a244e58b800266fc04f42f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732130866268 2024-11-20T19:27:47,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:47,870 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting fcf4d62229f44b87b349a33f923f6f2a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732130867403 2024-11-20T19:27:47,902 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#B#compaction#549 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:47,902 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/dd787e852c704a5c8083fa1673ed5067 is 50, key is test_row_0/B:col10/1732130867409/Put/seqid=0 2024-11-20T19:27:47,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130927899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130927901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:47,915 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:47,917 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208273c5151cd842bab2979a674c57f1a9_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130867868/Put/seqid=0 2024-11-20T19:27:47,931 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411208e078556ee1e4b9382525280059a7a91_6e8af0e10da4be5fa330b00646bb6e13 store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:47,933 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411208e078556ee1e4b9382525280059a7a91_6e8af0e10da4be5fa330b00646bb6e13, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:47,933 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208e078556ee1e4b9382525280059a7a91_6e8af0e10da4be5fa330b00646bb6e13 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:47,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742471_1647 (size=12207) 2024-11-20T19:27:47,972 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/dd787e852c704a5c8083fa1673ed5067 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/dd787e852c704a5c8083fa1673ed5067 2024-11-20T19:27:47,978 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/B of 6e8af0e10da4be5fa330b00646bb6e13 into dd787e852c704a5c8083fa1673ed5067(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:47,978 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:47,978 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/B, priority=13, startTime=1732130867867; duration=0sec 2024-11-20T19:27:47,978 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:47,978 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:B 2024-11-20T19:27:47,978 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:47,979 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:47,980 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/C is initiating minor compaction (all files) 2024-11-20T19:27:47,980 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/C in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:47,980 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/4b8fa00c3203446fb578ac47547f9d78, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/e01b70dd4e0b4d8483db9c987b17d92e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/3383f5e125fc47f5ad092f4bc14bbfde] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=35.3 K 2024-11-20T19:27:47,980 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b8fa00c3203446fb578ac47547f9d78, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732130865611 2024-11-20T19:27:47,980 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting e01b70dd4e0b4d8483db9c987b17d92e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732130866268 2024-11-20T19:27:47,981 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 3383f5e125fc47f5ad092f4bc14bbfde, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732130867403 2024-11-20T19:27:47,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742472_1648 (size=12154) 2024-11-20T19:27:47,988 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:47,992 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208273c5151cd842bab2979a674c57f1a9_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208273c5151cd842bab2979a674c57f1a9_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:47,993 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/480464d76a61432c8c83fb3054cfcb28, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:47,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/480464d76a61432c8c83fb3054cfcb28 is 175, key is test_row_0/A:col10/1732130867868/Put/seqid=0 2024-11-20T19:27:48,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742473_1649 (size=4469) 2024-11-20T19:27:48,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,009 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#A#compaction#550 average throughput is 0.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:48,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130928006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,010 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/a762e17f2e5141e4a9f2f3bad0582262 is 175, key is test_row_0/A:col10/1732130867409/Put/seqid=0 2024-11-20T19:27:48,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130928006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,017 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#C#compaction#552 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:48,017 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/51a86d1639f44a2ab63afc5f959d4f17 is 50, key is test_row_0/C:col10/1732130867409/Put/seqid=0 2024-11-20T19:27:48,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130928065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742474_1650 (size=30955) 2024-11-20T19:27:48,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742475_1651 (size=31161) 2024-11-20T19:27:48,090 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/a762e17f2e5141e4a9f2f3bad0582262 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a762e17f2e5141e4a9f2f3bad0582262 2024-11-20T19:27:48,095 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/A of 6e8af0e10da4be5fa330b00646bb6e13 into a762e17f2e5141e4a9f2f3bad0582262(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:48,095 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:48,095 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/A, priority=13, startTime=1732130867867; duration=0sec 2024-11-20T19:27:48,095 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:48,095 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:A 2024-11-20T19:27:48,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742476_1652 (size=12207) 2024-11-20T19:27:48,100 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/51a86d1639f44a2ab63afc5f959d4f17 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/51a86d1639f44a2ab63afc5f959d4f17 2024-11-20T19:27:48,104 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/C of 6e8af0e10da4be5fa330b00646bb6e13 into 51a86d1639f44a2ab63afc5f959d4f17(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:48,104 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:48,104 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/C, priority=13, startTime=1732130867867; duration=0sec 2024-11-20T19:27:48,105 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:48,105 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:C 2024-11-20T19:27:48,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130928211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130928211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130928367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,508 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/480464d76a61432c8c83fb3054cfcb28 2024-11-20T19:27:48,515 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/5c8aab5a0fdc485aac66bb758825addc is 50, key is test_row_0/B:col10/1732130867868/Put/seqid=0 2024-11-20T19:27:48,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130928514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130928514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742477_1653 (size=12001) 2024-11-20T19:27:48,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T19:27:48,582 INFO [Thread-2737 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-11-20T19:27:48,583 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:48,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-11-20T19:27:48,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T19:27:48,585 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:48,585 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:48,585 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:48,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130928627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,629 DEBUG [Thread-2727 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4160 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:48,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130928638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,640 DEBUG [Thread-2735 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:48,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T19:27:48,737 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T19:27:48,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:48,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:48,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:48,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:48,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:48,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:48,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130928869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T19:27:48,890 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:48,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T19:27:48,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:48,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:48,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:48,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:48,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:48,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:48,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/5c8aab5a0fdc485aac66bb758825addc 2024-11-20T19:27:48,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/ace5f45ea3524023bf6b4700365837ad is 50, key is test_row_0/C:col10/1732130867868/Put/seqid=0 2024-11-20T19:27:48,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742478_1654 (size=12001) 2024-11-20T19:27:48,985 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/ace5f45ea3524023bf6b4700365837ad 2024-11-20T19:27:48,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/480464d76a61432c8c83fb3054cfcb28 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/480464d76a61432c8c83fb3054cfcb28 2024-11-20T19:27:48,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/480464d76a61432c8c83fb3054cfcb28, entries=150, sequenceid=117, filesize=30.2 K 2024-11-20T19:27:48,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/5c8aab5a0fdc485aac66bb758825addc as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/5c8aab5a0fdc485aac66bb758825addc 2024-11-20T19:27:49,001 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/5c8aab5a0fdc485aac66bb758825addc, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T19:27:49,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/ace5f45ea3524023bf6b4700365837ad as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/ace5f45ea3524023bf6b4700365837ad 2024-11-20T19:27:49,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/ace5f45ea3524023bf6b4700365837ad, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T19:27:49,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 6e8af0e10da4be5fa330b00646bb6e13 in 1138ms, sequenceid=117, compaction requested=false 2024-11-20T19:27:49,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:49,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:49,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T19:27:49,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:49,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:49,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:49,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:49,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:49,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:49,043 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T19:27:49,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:49,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:49,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:49,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:49,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:49,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:49,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130929049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130929050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,058 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204e1f42986691492895f5bc9962cf5656_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130869022/Put/seqid=0 2024-11-20T19:27:49,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742479_1655 (size=14794) 2024-11-20T19:27:49,073 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:49,077 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204e1f42986691492895f5bc9962cf5656_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204e1f42986691492895f5bc9962cf5656_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:49,078 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/a8131681dfc94ab7933305a1f796d6aa, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:49,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/a8131681dfc94ab7933305a1f796d6aa is 175, key is test_row_0/A:col10/1732130869022/Put/seqid=0 2024-11-20T19:27:49,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742480_1656 (size=39749) 2024-11-20T19:27:49,104 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=138, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/a8131681dfc94ab7933305a1f796d6aa 2024-11-20T19:27:49,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/91f6d4bbb3f74cf5b9893f37efe55066 is 50, key is test_row_0/B:col10/1732130869022/Put/seqid=0 2024-11-20T19:27:49,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742481_1657 (size=12151) 2024-11-20T19:27:49,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/91f6d4bbb3f74cf5b9893f37efe55066 2024-11-20T19:27:49,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130929153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130929154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,160 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/a2272a705a4f469bb87e8059dbc2c5cd is 50, key is test_row_0/C:col10/1732130869022/Put/seqid=0 2024-11-20T19:27:49,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T19:27:49,195 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T19:27:49,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:49,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:49,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:49,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:49,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:49,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:49,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742482_1658 (size=12151) 2024-11-20T19:27:49,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/a2272a705a4f469bb87e8059dbc2c5cd 2024-11-20T19:27:49,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/a8131681dfc94ab7933305a1f796d6aa as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a8131681dfc94ab7933305a1f796d6aa 2024-11-20T19:27:49,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a8131681dfc94ab7933305a1f796d6aa, entries=200, sequenceid=138, filesize=38.8 K 2024-11-20T19:27:49,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/91f6d4bbb3f74cf5b9893f37efe55066 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/91f6d4bbb3f74cf5b9893f37efe55066 2024-11-20T19:27:49,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/91f6d4bbb3f74cf5b9893f37efe55066, entries=150, sequenceid=138, filesize=11.9 K 2024-11-20T19:27:49,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/a2272a705a4f469bb87e8059dbc2c5cd as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a2272a705a4f469bb87e8059dbc2c5cd 2024-11-20T19:27:49,224 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a2272a705a4f469bb87e8059dbc2c5cd, entries=150, sequenceid=138, filesize=11.9 K 2024-11-20T19:27:49,224 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 6e8af0e10da4be5fa330b00646bb6e13 in 201ms, sequenceid=138, compaction requested=true 2024-11-20T19:27:49,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:49,225 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:49,225 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:49,225 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/A is initiating minor compaction (all files) 2024-11-20T19:27:49,226 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/A in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:49,226 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a762e17f2e5141e4a9f2f3bad0582262, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/480464d76a61432c8c83fb3054cfcb28, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a8131681dfc94ab7933305a1f796d6aa] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=99.5 K 2024-11-20T19:27:49,226 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:49,226 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a762e17f2e5141e4a9f2f3bad0582262, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/480464d76a61432c8c83fb3054cfcb28, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a8131681dfc94ab7933305a1f796d6aa] 2024-11-20T19:27:49,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:49,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:49,226 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting a762e17f2e5141e4a9f2f3bad0582262, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732130867403 2024-11-20T19:27:49,226 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 480464d76a61432c8c83fb3054cfcb28, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732130867758 2024-11-20T19:27:49,227 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8131681dfc94ab7933305a1f796d6aa, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732130867892 2024-11-20T19:27:49,228 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:49,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:49,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:49,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:49,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:49,229 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:49,229 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/B is initiating minor compaction (all files) 2024-11-20T19:27:49,229 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/B in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:49,229 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/dd787e852c704a5c8083fa1673ed5067, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/5c8aab5a0fdc485aac66bb758825addc, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/91f6d4bbb3f74cf5b9893f37efe55066] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=35.5 K 2024-11-20T19:27:49,230 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting dd787e852c704a5c8083fa1673ed5067, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732130867403 2024-11-20T19:27:49,231 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c8aab5a0fdc485aac66bb758825addc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732130867758 2024-11-20T19:27:49,231 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 91f6d4bbb3f74cf5b9893f37efe55066, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732130867899 2024-11-20T19:27:49,246 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:49,255 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#B#compaction#559 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:49,256 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/42284fc65d4f43db8ec006a3eee60f5a is 50, key is test_row_0/B:col10/1732130869022/Put/seqid=0 2024-11-20T19:27:49,269 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120fa93e545664d4da3a415f8300df394a3_6e8af0e10da4be5fa330b00646bb6e13 store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:49,271 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120fa93e545664d4da3a415f8300df394a3_6e8af0e10da4be5fa330b00646bb6e13, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:49,271 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fa93e545664d4da3a415f8300df394a3_6e8af0e10da4be5fa330b00646bb6e13 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:49,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742483_1659 (size=12459) 2024-11-20T19:27:49,297 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/42284fc65d4f43db8ec006a3eee60f5a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/42284fc65d4f43db8ec006a3eee60f5a 2024-11-20T19:27:49,302 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/B of 6e8af0e10da4be5fa330b00646bb6e13 into 42284fc65d4f43db8ec006a3eee60f5a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:49,303 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:49,303 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/B, priority=13, startTime=1732130869228; duration=0sec 2024-11-20T19:27:49,303 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:49,303 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:B 2024-11-20T19:27:49,303 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:49,304 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:49,304 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/C is initiating minor compaction (all files) 2024-11-20T19:27:49,304 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/C in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:49,305 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/51a86d1639f44a2ab63afc5f959d4f17, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/ace5f45ea3524023bf6b4700365837ad, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a2272a705a4f469bb87e8059dbc2c5cd] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=35.5 K 2024-11-20T19:27:49,305 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 51a86d1639f44a2ab63afc5f959d4f17, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732130867403 2024-11-20T19:27:49,305 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting ace5f45ea3524023bf6b4700365837ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732130867758 2024-11-20T19:27:49,306 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a2272a705a4f469bb87e8059dbc2c5cd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732130867899 2024-11-20T19:27:49,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742484_1660 (size=4469) 2024-11-20T19:27:49,313 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#A#compaction#558 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:49,313 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/2d4e41ddb0cd49e48f89e93cb5c323e7 is 175, key is test_row_0/A:col10/1732130869022/Put/seqid=0 2024-11-20T19:27:49,316 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#C#compaction#560 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:49,316 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/2f0f1659cff24b15b964d5a9b85f7f5e is 50, key is test_row_0/C:col10/1732130869022/Put/seqid=0 2024-11-20T19:27:49,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742485_1661 (size=31413) 2024-11-20T19:27:49,325 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/2d4e41ddb0cd49e48f89e93cb5c323e7 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/2d4e41ddb0cd49e48f89e93cb5c323e7 2024-11-20T19:27:49,329 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/A of 6e8af0e10da4be5fa330b00646bb6e13 into 2d4e41ddb0cd49e48f89e93cb5c323e7(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:49,329 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:49,329 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/A, priority=13, startTime=1732130869224; duration=0sec 2024-11-20T19:27:49,329 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:49,329 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:A 2024-11-20T19:27:49,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742486_1662 (size=12459) 2024-11-20T19:27:49,348 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T19:27:49,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:49,348 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T19:27:49,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:49,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:49,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:49,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:49,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:49,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:49,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112092284b95c29c469799d5750aac58417c_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130869049/Put/seqid=0 2024-11-20T19:27:49,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742487_1663 (size=12304) 2024-11-20T19:27:49,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:49,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:49,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130929374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130929376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130929476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130929478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130929680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130929680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T19:27:49,733 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/2f0f1659cff24b15b964d5a9b85f7f5e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/2f0f1659cff24b15b964d5a9b85f7f5e 2024-11-20T19:27:49,736 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/C of 6e8af0e10da4be5fa330b00646bb6e13 into 2f0f1659cff24b15b964d5a9b85f7f5e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:49,736 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:49,736 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/C, priority=13, startTime=1732130869228; duration=0sec 2024-11-20T19:27:49,736 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:49,736 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:C 2024-11-20T19:27:49,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:49,762 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112092284b95c29c469799d5750aac58417c_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112092284b95c29c469799d5750aac58417c_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:49,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7a375fd84107458f86ff91288aad9653, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:49,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7a375fd84107458f86ff91288aad9653 is 175, key is test_row_0/A:col10/1732130869049/Put/seqid=0 2024-11-20T19:27:49,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742488_1664 (size=31105) 2024-11-20T19:27:49,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130929881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130929982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:49,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130929983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:50,166 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7a375fd84107458f86ff91288aad9653 2024-11-20T19:27:50,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/d622df7f3a634641bf5ec26914b0b947 is 50, key is test_row_0/B:col10/1732130869049/Put/seqid=0 2024-11-20T19:27:50,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742489_1665 (size=12151) 2024-11-20T19:27:50,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:50,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130930487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:50,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:50,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130930489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:50,574 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/d622df7f3a634641bf5ec26914b0b947 2024-11-20T19:27:50,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/14386f78fe2d48dab49a5791a7bcd878 is 50, key is test_row_0/C:col10/1732130869049/Put/seqid=0 2024-11-20T19:27:50,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742490_1666 (size=12151) 2024-11-20T19:27:50,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T19:27:50,982 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/14386f78fe2d48dab49a5791a7bcd878 2024-11-20T19:27:50,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7a375fd84107458f86ff91288aad9653 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7a375fd84107458f86ff91288aad9653 2024-11-20T19:27:50,987 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7a375fd84107458f86ff91288aad9653, entries=150, sequenceid=158, filesize=30.4 K 2024-11-20T19:27:50,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/d622df7f3a634641bf5ec26914b0b947 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d622df7f3a634641bf5ec26914b0b947 2024-11-20T19:27:50,990 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d622df7f3a634641bf5ec26914b0b947, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T19:27:50,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/14386f78fe2d48dab49a5791a7bcd878 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/14386f78fe2d48dab49a5791a7bcd878 2024-11-20T19:27:50,992 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/14386f78fe2d48dab49a5791a7bcd878, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T19:27:50,993 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 6e8af0e10da4be5fa330b00646bb6e13 in 1645ms, sequenceid=158, compaction requested=false 2024-11-20T19:27:50,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:50,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:50,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-11-20T19:27:50,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-11-20T19:27:50,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-20T19:27:50,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4090 sec 2024-11-20T19:27:50,995 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 2.4120 sec 2024-11-20T19:27:51,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:51,495 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T19:27:51,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:51,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:51,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:51,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:51,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:51,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:51,500 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120da4372a9df884d788a9b62e415150ff4_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130869373/Put/seqid=0 2024-11-20T19:27:51,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742491_1667 (size=14794) 2024-11-20T19:27:51,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:51,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130931513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:51,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:51,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130931515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:51,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:51,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130931616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:51,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:51,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130931618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:51,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:51,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130931820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:51,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:51,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130931822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:51,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:51,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130931888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:51,890 DEBUG [Thread-2733 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:51,903 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:51,906 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120da4372a9df884d788a9b62e415150ff4_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120da4372a9df884d788a9b62e415150ff4_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:51,907 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/9a96133d8aee465a9c9ae152dc4bb6ce, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:51,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/9a96133d8aee465a9c9ae152dc4bb6ce is 175, key is test_row_0/A:col10/1732130869373/Put/seqid=0 2024-11-20T19:27:51,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742492_1668 (size=39749) 2024-11-20T19:27:52,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:52,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130932124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:52,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:52,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130932126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:52,310 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=178, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/9a96133d8aee465a9c9ae152dc4bb6ce 2024-11-20T19:27:52,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/43dda5364741472db4e2fcf31f1bde31 is 50, key is test_row_0/B:col10/1732130869373/Put/seqid=0 2024-11-20T19:27:52,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742493_1669 (size=12151) 2024-11-20T19:27:52,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130932629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:52,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130932631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:52,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130932664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:52,666 DEBUG [Thread-2735 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8197 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:52,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130932667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:52,670 DEBUG [Thread-2727 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8201 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T19:27:52,688 INFO [Thread-2737 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-20T19:27:52,689 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-20T19:27:52,690 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:52,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T19:27:52,691 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:52,691 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:52,718 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/43dda5364741472db4e2fcf31f1bde31 2024-11-20T19:27:52,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/378cf7cf73f04edfa29a3b79111c7368 is 50, key is test_row_0/C:col10/1732130869373/Put/seqid=0 2024-11-20T19:27:52,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742494_1670 (size=12151) 2024-11-20T19:27:52,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T19:27:52,843 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:52,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T19:27:52,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:52,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:52,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:52,844 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:52,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:52,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T19:27:52,996 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:52,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T19:27:52,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:52,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:52,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:52,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:52,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:52,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:53,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/378cf7cf73f04edfa29a3b79111c7368 2024-11-20T19:27:53,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/9a96133d8aee465a9c9ae152dc4bb6ce as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/9a96133d8aee465a9c9ae152dc4bb6ce 2024-11-20T19:27:53,130 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/9a96133d8aee465a9c9ae152dc4bb6ce, entries=200, sequenceid=178, filesize=38.8 K 2024-11-20T19:27:53,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/43dda5364741472db4e2fcf31f1bde31 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/43dda5364741472db4e2fcf31f1bde31 2024-11-20T19:27:53,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/43dda5364741472db4e2fcf31f1bde31, entries=150, sequenceid=178, filesize=11.9 K 2024-11-20T19:27:53,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/378cf7cf73f04edfa29a3b79111c7368 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/378cf7cf73f04edfa29a3b79111c7368 2024-11-20T19:27:53,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/378cf7cf73f04edfa29a3b79111c7368, entries=150, sequenceid=178, filesize=11.9 K 2024-11-20T19:27:53,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 6e8af0e10da4be5fa330b00646bb6e13 in 1641ms, sequenceid=178, compaction requested=true 2024-11-20T19:27:53,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:53,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:53,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:53,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:53,137 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:53,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:53,137 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:53,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:53,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:53,138 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:53,138 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:53,138 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/A is initiating minor compaction (all files) 2024-11-20T19:27:53,138 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/B is initiating minor compaction (all files) 2024-11-20T19:27:53,138 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/A in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:53,138 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/B in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:53,138 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/2d4e41ddb0cd49e48f89e93cb5c323e7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7a375fd84107458f86ff91288aad9653, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/9a96133d8aee465a9c9ae152dc4bb6ce] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=99.9 K 2024-11-20T19:27:53,138 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/42284fc65d4f43db8ec006a3eee60f5a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d622df7f3a634641bf5ec26914b0b947, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/43dda5364741472db4e2fcf31f1bde31] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=35.9 K 2024-11-20T19:27:53,138 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:53,138 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/2d4e41ddb0cd49e48f89e93cb5c323e7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7a375fd84107458f86ff91288aad9653, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/9a96133d8aee465a9c9ae152dc4bb6ce] 2024-11-20T19:27:53,139 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 42284fc65d4f43db8ec006a3eee60f5a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732130867899 2024-11-20T19:27:53,139 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d4e41ddb0cd49e48f89e93cb5c323e7, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732130867899 2024-11-20T19:27:53,139 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d622df7f3a634641bf5ec26914b0b947, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732130869045 2024-11-20T19:27:53,139 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a375fd84107458f86ff91288aad9653, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732130869045 2024-11-20T19:27:53,139 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 43dda5364741472db4e2fcf31f1bde31, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732130869365 2024-11-20T19:27:53,139 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a96133d8aee465a9c9ae152dc4bb6ce, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732130869365 2024-11-20T19:27:53,143 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:53,144 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120274ecb2496d84e4785ff3e0ff8013c73_6e8af0e10da4be5fa330b00646bb6e13 store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:53,145 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#B#compaction#568 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:53,145 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/f98017f55b1b4293b08b39a21e331170 is 50, key is test_row_0/B:col10/1732130869373/Put/seqid=0 2024-11-20T19:27:53,145 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120274ecb2496d84e4785ff3e0ff8013c73_6e8af0e10da4be5fa330b00646bb6e13, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:53,145 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120274ecb2496d84e4785ff3e0ff8013c73_6e8af0e10da4be5fa330b00646bb6e13 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:53,148 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:53,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742495_1671 (size=12561) 2024-11-20T19:27:53,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T19:27:53,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:53,148 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T19:27:53,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:53,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:53,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:53,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:53,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:53,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:53,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742496_1672 (size=4469) 2024-11-20T19:27:53,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112003d9a919582c4834ba95309a5d83a55a_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130871512/Put/seqid=0 2024-11-20T19:27:53,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742497_1673 (size=12304) 2024-11-20T19:27:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T19:27:53,552 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/f98017f55b1b4293b08b39a21e331170 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/f98017f55b1b4293b08b39a21e331170 2024-11-20T19:27:53,553 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#A#compaction#567 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:53,553 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/8c669a1925094fa894feada7107d529a is 175, key is test_row_0/A:col10/1732130869373/Put/seqid=0 2024-11-20T19:27:53,556 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/B of 6e8af0e10da4be5fa330b00646bb6e13 into f98017f55b1b4293b08b39a21e331170(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:53,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742498_1674 (size=31515) 2024-11-20T19:27:53,556 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:53,556 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/B, priority=13, startTime=1732130873137; duration=0sec 2024-11-20T19:27:53,556 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:53,556 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:B 2024-11-20T19:27:53,556 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:53,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:53,558 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:53,558 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/C is initiating minor compaction (all files) 2024-11-20T19:27:53,558 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/C in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:53,558 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/2f0f1659cff24b15b964d5a9b85f7f5e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/14386f78fe2d48dab49a5791a7bcd878, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/378cf7cf73f04edfa29a3b79111c7368] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=35.9 K 2024-11-20T19:27:53,558 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f0f1659cff24b15b964d5a9b85f7f5e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732130867899 2024-11-20T19:27:53,559 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 14386f78fe2d48dab49a5791a7bcd878, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732130869045 2024-11-20T19:27:53,559 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 378cf7cf73f04edfa29a3b79111c7368, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732130869365 2024-11-20T19:27:53,561 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112003d9a919582c4834ba95309a5d83a55a_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112003d9a919582c4834ba95309a5d83a55a_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:53,561 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/8c669a1925094fa894feada7107d529a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/8c669a1925094fa894feada7107d529a 2024-11-20T19:27:53,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/86c830189fcc4405a72241aa5502e709, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:53,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/86c830189fcc4405a72241aa5502e709 is 175, key is test_row_0/A:col10/1732130871512/Put/seqid=0 2024-11-20T19:27:53,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742499_1675 (size=31105) 2024-11-20T19:27:53,565 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#C#compaction#570 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:53,565 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/d0dc383a7d634bdbb0222076b0ba3667 is 50, key is test_row_0/C:col10/1732130869373/Put/seqid=0 2024-11-20T19:27:53,565 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=195, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/86c830189fcc4405a72241aa5502e709 2024-11-20T19:27:53,567 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/A of 6e8af0e10da4be5fa330b00646bb6e13 into 8c669a1925094fa894feada7107d529a(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:53,567 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:53,567 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/A, priority=13, startTime=1732130873137; duration=0sec 2024-11-20T19:27:53,567 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:53,567 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:A 2024-11-20T19:27:53,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742500_1676 (size=12561) 2024-11-20T19:27:53,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/c51d9fdd1f924c74a5beeeabc0acd493 is 50, key is test_row_0/B:col10/1732130871512/Put/seqid=0 2024-11-20T19:27:53,575 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/d0dc383a7d634bdbb0222076b0ba3667 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d0dc383a7d634bdbb0222076b0ba3667 2024-11-20T19:27:53,578 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/C of 6e8af0e10da4be5fa330b00646bb6e13 into d0dc383a7d634bdbb0222076b0ba3667(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:53,578 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:53,578 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/C, priority=13, startTime=1732130873137; duration=0sec 2024-11-20T19:27:53,578 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:53,578 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:C 2024-11-20T19:27:53,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742501_1677 (size=12151) 2024-11-20T19:27:53,585 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/c51d9fdd1f924c74a5beeeabc0acd493 2024-11-20T19:27:53,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/90ba572a77ac4ee7a689e5197a1857e1 is 50, key is test_row_0/C:col10/1732130871512/Put/seqid=0 2024-11-20T19:27:53,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742502_1678 (size=12151) 2024-11-20T19:27:53,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:53,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:53,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130933662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:53,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130933662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:53,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:53,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130933765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:53,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:53,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130933765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:53,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T19:27:53,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:53,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130933966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:53,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:53,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130933967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:54,013 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/90ba572a77ac4ee7a689e5197a1857e1 2024-11-20T19:27:54,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/86c830189fcc4405a72241aa5502e709 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/86c830189fcc4405a72241aa5502e709 2024-11-20T19:27:54,019 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/86c830189fcc4405a72241aa5502e709, entries=150, sequenceid=195, filesize=30.4 K 2024-11-20T19:27:54,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/c51d9fdd1f924c74a5beeeabc0acd493 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/c51d9fdd1f924c74a5beeeabc0acd493 2024-11-20T19:27:54,023 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/c51d9fdd1f924c74a5beeeabc0acd493, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T19:27:54,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/90ba572a77ac4ee7a689e5197a1857e1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/90ba572a77ac4ee7a689e5197a1857e1 2024-11-20T19:27:54,026 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/90ba572a77ac4ee7a689e5197a1857e1, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T19:27:54,026 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 6e8af0e10da4be5fa330b00646bb6e13 in 878ms, sequenceid=195, compaction requested=false 2024-11-20T19:27:54,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:54,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:54,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-20T19:27:54,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-20T19:27:54,028 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-20T19:27:54,028 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3360 sec 2024-11-20T19:27:54,028 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 1.3390 sec 2024-11-20T19:27:54,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:54,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T19:27:54,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:54,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:54,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:54,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:54,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:54,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:54,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b910d53b227349f78949dc8255e6f18f_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130873661/Put/seqid=0 2024-11-20T19:27:54,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742503_1679 (size=14794) 2024-11-20T19:27:54,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:54,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130934284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:54,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:54,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130934285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:54,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:54,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130934387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:54,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:54,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130934388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:54,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:54,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130934589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:54,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:54,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130934590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:54,681 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:54,683 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b910d53b227349f78949dc8255e6f18f_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b910d53b227349f78949dc8255e6f18f_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:54,684 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/1f9606fa65b54aa39b0f0cd17b8531dd, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:54,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/1f9606fa65b54aa39b0f0cd17b8531dd is 175, key is test_row_0/A:col10/1732130873661/Put/seqid=0 2024-11-20T19:27:54,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742504_1680 (size=39749) 2024-11-20T19:27:54,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T19:27:54,794 INFO [Thread-2737 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-20T19:27:54,795 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:54,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-20T19:27:54,795 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:54,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:54,796 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:54,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:54,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:54,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130934891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:54,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:54,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130934892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:54,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:54,947 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:54,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:54,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:54,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:54,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:54,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:54,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:54,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,087 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=220, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/1f9606fa65b54aa39b0f0cd17b8531dd 2024-11-20T19:27:55,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/9c84778d60eb491183d3fea262742723 is 50, key is test_row_0/B:col10/1732130873661/Put/seqid=0 2024-11-20T19:27:55,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742505_1681 (size=12151) 2024-11-20T19:27:55,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:55,099 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:55,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:55,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:55,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,251 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:55,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:55,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:55,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:55,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130935394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:55,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:55,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:55,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130935396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:55,403 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:55,403 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:55,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:55,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,403 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,496 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/9c84778d60eb491183d3fea262742723 2024-11-20T19:27:55,501 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/1042e92fc7d743e0998d8d9b0f2fbadf is 50, key is test_row_0/C:col10/1732130873661/Put/seqid=0 2024-11-20T19:27:55,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742506_1682 (size=12151) 2024-11-20T19:27:55,554 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:55,554 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:55,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:55,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,554 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:55,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:55,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:55,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,706 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,858 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:55,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:55,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:55,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:55,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:55,904 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/1042e92fc7d743e0998d8d9b0f2fbadf 2024-11-20T19:27:55,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/1f9606fa65b54aa39b0f0cd17b8531dd as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1f9606fa65b54aa39b0f0cd17b8531dd 2024-11-20T19:27:55,909 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1f9606fa65b54aa39b0f0cd17b8531dd, entries=200, sequenceid=220, filesize=38.8 K 2024-11-20T19:27:55,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/9c84778d60eb491183d3fea262742723 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/9c84778d60eb491183d3fea262742723 2024-11-20T19:27:55,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:55,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46538 deadline: 1732130935908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:55,910 DEBUG [Thread-2733 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., hostname=db9c3a6c6492,41229,1732130701496, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:55,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/9c84778d60eb491183d3fea262742723, entries=150, sequenceid=220, filesize=11.9 K 2024-11-20T19:27:55,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/1042e92fc7d743e0998d8d9b0f2fbadf as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/1042e92fc7d743e0998d8d9b0f2fbadf 2024-11-20T19:27:55,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/1042e92fc7d743e0998d8d9b0f2fbadf, entries=150, sequenceid=220, filesize=11.9 K 2024-11-20T19:27:55,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 6e8af0e10da4be5fa330b00646bb6e13 in 1645ms, sequenceid=220, compaction requested=true 2024-11-20T19:27:55,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:55,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:55,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:55,916 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:55,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:55,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:55,916 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:55,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:55,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:55,917 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:55,917 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:55,917 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/A is initiating minor compaction (all files) 2024-11-20T19:27:55,917 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/B is initiating minor compaction (all files) 2024-11-20T19:27:55,917 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/A in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,917 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/B in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,917 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/8c669a1925094fa894feada7107d529a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/86c830189fcc4405a72241aa5502e709, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1f9606fa65b54aa39b0f0cd17b8531dd] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=100.0 K 2024-11-20T19:27:55,917 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/f98017f55b1b4293b08b39a21e331170, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/c51d9fdd1f924c74a5beeeabc0acd493, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/9c84778d60eb491183d3fea262742723] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=36.0 K 2024-11-20T19:27:55,917 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:55,917 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/8c669a1925094fa894feada7107d529a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/86c830189fcc4405a72241aa5502e709, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1f9606fa65b54aa39b0f0cd17b8531dd] 2024-11-20T19:27:55,918 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting f98017f55b1b4293b08b39a21e331170, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732130869365 2024-11-20T19:27:55,918 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c669a1925094fa894feada7107d529a, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732130869365 2024-11-20T19:27:55,918 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c51d9fdd1f924c74a5beeeabc0acd493, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732130871508 2024-11-20T19:27:55,918 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86c830189fcc4405a72241aa5502e709, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732130871508 2024-11-20T19:27:55,918 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c84778d60eb491183d3fea262742723, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732130873660 2024-11-20T19:27:55,918 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f9606fa65b54aa39b0f0cd17b8531dd, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732130873660 2024-11-20T19:27:55,922 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:55,923 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#B#compaction#576 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:55,924 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/ecc4d7decd2a413fbfa796c6706fd229 is 50, key is test_row_0/B:col10/1732130873661/Put/seqid=0 2024-11-20T19:27:55,925 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120c3a76653aba24420b32a553a54a961b6_6e8af0e10da4be5fa330b00646bb6e13 store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:55,927 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120c3a76653aba24420b32a553a54a961b6_6e8af0e10da4be5fa330b00646bb6e13, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:55,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742507_1683 (size=12663) 2024-11-20T19:27:55,927 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c3a76653aba24420b32a553a54a961b6_6e8af0e10da4be5fa330b00646bb6e13 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:55,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742508_1684 (size=4469) 2024-11-20T19:27:56,010 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:56,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:56,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:56,011 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T19:27:56,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:56,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:56,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:56,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:56,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:56,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:56,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112096a7a79c44374cb6b2411c3dfeb60cd3_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130874282/Put/seqid=0 2024-11-20T19:27:56,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742509_1685 (size=12304) 2024-11-20T19:27:56,331 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/ecc4d7decd2a413fbfa796c6706fd229 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/ecc4d7decd2a413fbfa796c6706fd229 2024-11-20T19:27:56,332 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#A#compaction#577 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:56,332 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/d0f333c99ef04db38bab15ba5cb94ac5 is 175, key is test_row_0/A:col10/1732130873661/Put/seqid=0 2024-11-20T19:27:56,335 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/B of 6e8af0e10da4be5fa330b00646bb6e13 into ecc4d7decd2a413fbfa796c6706fd229(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:56,335 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:56,335 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/B, priority=13, startTime=1732130875916; duration=0sec 2024-11-20T19:27:56,335 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:56,335 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:B 2024-11-20T19:27:56,335 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:56,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742510_1686 (size=31617) 2024-11-20T19:27:56,336 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:56,336 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/C is initiating minor compaction (all files) 2024-11-20T19:27:56,336 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/C in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:56,336 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d0dc383a7d634bdbb0222076b0ba3667, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/90ba572a77ac4ee7a689e5197a1857e1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/1042e92fc7d743e0998d8d9b0f2fbadf] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=36.0 K 2024-11-20T19:27:56,336 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d0dc383a7d634bdbb0222076b0ba3667, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732130869365 2024-11-20T19:27:56,337 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 90ba572a77ac4ee7a689e5197a1857e1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732130871508 2024-11-20T19:27:56,337 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 1042e92fc7d743e0998d8d9b0f2fbadf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732130873660 2024-11-20T19:27:56,341 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#C#compaction#579 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:56,341 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/fc364298852346ef9ea16dfb8c9c96ae is 50, key is test_row_0/C:col10/1732130873661/Put/seqid=0 2024-11-20T19:27:56,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742511_1687 (size=12663) 2024-11-20T19:27:56,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:56,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:56,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:56,420 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112096a7a79c44374cb6b2411c3dfeb60cd3_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112096a7a79c44374cb6b2411c3dfeb60cd3_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:56,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/16abe1552461490eae63a90b460ffc89, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:56,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/16abe1552461490eae63a90b460ffc89 is 175, key is test_row_0/A:col10/1732130874282/Put/seqid=0 2024-11-20T19:27:56,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742512_1688 (size=31105) 2024-11-20T19:27:56,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:56,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130936433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:56,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:56,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130936433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:56,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:56,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:56,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130936536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:56,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130936536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:56,740 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/d0f333c99ef04db38bab15ba5cb94ac5 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/d0f333c99ef04db38bab15ba5cb94ac5 2024-11-20T19:27:56,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:56,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130936739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:56,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:56,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130936739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:56,743 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/A of 6e8af0e10da4be5fa330b00646bb6e13 into d0f333c99ef04db38bab15ba5cb94ac5(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:56,744 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:56,744 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/A, priority=13, startTime=1732130875916; duration=0sec 2024-11-20T19:27:56,744 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:56,744 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:A 2024-11-20T19:27:56,747 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/fc364298852346ef9ea16dfb8c9c96ae as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/fc364298852346ef9ea16dfb8c9c96ae 2024-11-20T19:27:56,750 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/C of 6e8af0e10da4be5fa330b00646bb6e13 into fc364298852346ef9ea16dfb8c9c96ae(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:56,750 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:56,750 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/C, priority=13, startTime=1732130875916; duration=0sec 2024-11-20T19:27:56,750 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:56,750 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:C 2024-11-20T19:27:56,824 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/16abe1552461490eae63a90b460ffc89 2024-11-20T19:27:56,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/cfd7349fd1af4b43b8a1e9000464af7e is 50, key is test_row_0/B:col10/1732130874282/Put/seqid=0 2024-11-20T19:27:56,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742513_1689 (size=12151) 2024-11-20T19:27:56,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:57,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:57,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130937042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:57,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:57,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130937043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:57,232 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/cfd7349fd1af4b43b8a1e9000464af7e 2024-11-20T19:27:57,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/5a133eb583f547d488f8dae44875c372 is 50, key is test_row_0/C:col10/1732130874282/Put/seqid=0 2024-11-20T19:27:57,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742514_1690 (size=12151) 2024-11-20T19:27:57,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:57,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130937545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:57,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:57,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130937547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:57,640 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/5a133eb583f547d488f8dae44875c372 2024-11-20T19:27:57,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/16abe1552461490eae63a90b460ffc89 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/16abe1552461490eae63a90b460ffc89 2024-11-20T19:27:57,645 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/16abe1552461490eae63a90b460ffc89, entries=150, sequenceid=234, filesize=30.4 K 2024-11-20T19:27:57,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/cfd7349fd1af4b43b8a1e9000464af7e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/cfd7349fd1af4b43b8a1e9000464af7e 2024-11-20T19:27:57,648 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/cfd7349fd1af4b43b8a1e9000464af7e, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T19:27:57,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/5a133eb583f547d488f8dae44875c372 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/5a133eb583f547d488f8dae44875c372 2024-11-20T19:27:57,651 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/5a133eb583f547d488f8dae44875c372, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T19:27:57,652 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6e8af0e10da4be5fa330b00646bb6e13 in 1642ms, sequenceid=234, compaction requested=false 2024-11-20T19:27:57,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:57,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:57,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-20T19:27:57,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-20T19:27:57,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-20T19:27:57,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8570 sec 2024-11-20T19:27:57,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 2.8580 sec 2024-11-20T19:27:58,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:58,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:27:58,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:58,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:58,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:58,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:58,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:58,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:58,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207e3dacfb832742aca1d3b87cbaf67f02_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130878548/Put/seqid=0 2024-11-20T19:27:58,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742515_1691 (size=14844) 2024-11-20T19:27:58,571 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:58,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:58,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130938569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:58,574 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207e3dacfb832742aca1d3b87cbaf67f02_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207e3dacfb832742aca1d3b87cbaf67f02_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:58,575 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7ef1f2ab67f84ebc95df26cbb9631625, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:58,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:58,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130938572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:58,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7ef1f2ab67f84ebc95df26cbb9631625 is 175, key is test_row_0/A:col10/1732130878548/Put/seqid=0 2024-11-20T19:27:58,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742516_1692 (size=39799) 2024-11-20T19:27:58,581 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=260, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7ef1f2ab67f84ebc95df26cbb9631625 2024-11-20T19:27:58,587 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/d3b8fd936da44060945d8b457785b64b is 50, key is test_row_0/B:col10/1732130878548/Put/seqid=0 2024-11-20T19:27:58,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742517_1693 (size=12201) 2024-11-20T19:27:58,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/d3b8fd936da44060945d8b457785b64b 2024-11-20T19:27:58,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/a189aa6b8af04dd9a69fb3c859485705 is 50, key is test_row_0/C:col10/1732130878548/Put/seqid=0 2024-11-20T19:27:58,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742518_1694 (size=12201) 2024-11-20T19:27:58,662 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/a189aa6b8af04dd9a69fb3c859485705 2024-11-20T19:27:58,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7ef1f2ab67f84ebc95df26cbb9631625 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7ef1f2ab67f84ebc95df26cbb9631625 2024-11-20T19:27:58,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7ef1f2ab67f84ebc95df26cbb9631625, entries=200, sequenceid=260, filesize=38.9 K 2024-11-20T19:27:58,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/d3b8fd936da44060945d8b457785b64b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d3b8fd936da44060945d8b457785b64b 2024-11-20T19:27:58,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d3b8fd936da44060945d8b457785b64b, entries=150, sequenceid=260, filesize=11.9 K 2024-11-20T19:27:58,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/a189aa6b8af04dd9a69fb3c859485705 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a189aa6b8af04dd9a69fb3c859485705 2024-11-20T19:27:58,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:58,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130938674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:58,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a189aa6b8af04dd9a69fb3c859485705, entries=150, sequenceid=260, filesize=11.9 K 2024-11-20T19:27:58,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:58,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130938678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:58,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6e8af0e10da4be5fa330b00646bb6e13 in 134ms, sequenceid=260, compaction requested=true 2024-11-20T19:27:58,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:58,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:58,683 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:58,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:58,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:58,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:58,683 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:58,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:58,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:58,684 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:58,684 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/B is initiating minor compaction (all files) 2024-11-20T19:27:58,684 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/B in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:58,684 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/ecc4d7decd2a413fbfa796c6706fd229, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/cfd7349fd1af4b43b8a1e9000464af7e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d3b8fd936da44060945d8b457785b64b] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=36.1 K 2024-11-20T19:27:58,684 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102521 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:58,684 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/A is initiating minor compaction (all files) 2024-11-20T19:27:58,685 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/A in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:58,685 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/d0f333c99ef04db38bab15ba5cb94ac5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/16abe1552461490eae63a90b460ffc89, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7ef1f2ab67f84ebc95df26cbb9631625] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=100.1 K 2024-11-20T19:27:58,685 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:58,685 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/d0f333c99ef04db38bab15ba5cb94ac5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/16abe1552461490eae63a90b460ffc89, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7ef1f2ab67f84ebc95df26cbb9631625] 2024-11-20T19:27:58,686 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting ecc4d7decd2a413fbfa796c6706fd229, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732130873660 2024-11-20T19:27:58,686 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0f333c99ef04db38bab15ba5cb94ac5, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732130873660 2024-11-20T19:27:58,686 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting cfd7349fd1af4b43b8a1e9000464af7e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732130874278 2024-11-20T19:27:58,686 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16abe1552461490eae63a90b460ffc89, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732130874278 2024-11-20T19:27:58,686 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d3b8fd936da44060945d8b457785b64b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1732130876429 2024-11-20T19:27:58,687 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ef1f2ab67f84ebc95df26cbb9631625, keycount=200, bloomtype=ROW, size=38.9 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1732130876429 2024-11-20T19:27:58,703 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:58,712 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#B#compaction#586 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:58,712 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/b9ba72fb814e47eeb9c047403e37397b is 50, key is test_row_0/B:col10/1732130878548/Put/seqid=0 2024-11-20T19:27:58,714 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120282644e81f3a40cda8a283c11e8f6923_6e8af0e10da4be5fa330b00646bb6e13 store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:58,716 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120282644e81f3a40cda8a283c11e8f6923_6e8af0e10da4be5fa330b00646bb6e13, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:58,716 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120282644e81f3a40cda8a283c11e8f6923_6e8af0e10da4be5fa330b00646bb6e13 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:58,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742520_1696 (size=4469) 2024-11-20T19:27:58,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742519_1695 (size=12815) 2024-11-20T19:27:58,740 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/b9ba72fb814e47eeb9c047403e37397b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/b9ba72fb814e47eeb9c047403e37397b 2024-11-20T19:27:58,744 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/B of 6e8af0e10da4be5fa330b00646bb6e13 into b9ba72fb814e47eeb9c047403e37397b(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:58,744 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:58,744 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/B, priority=13, startTime=1732130878683; duration=0sec 2024-11-20T19:27:58,744 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:58,744 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:B 2024-11-20T19:27:58,745 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:58,745 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:58,745 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/C is initiating minor compaction (all files) 2024-11-20T19:27:58,746 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/C in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:58,746 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/fc364298852346ef9ea16dfb8c9c96ae, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/5a133eb583f547d488f8dae44875c372, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a189aa6b8af04dd9a69fb3c859485705] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=36.1 K 2024-11-20T19:27:58,746 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting fc364298852346ef9ea16dfb8c9c96ae, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732130873660 2024-11-20T19:27:58,746 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a133eb583f547d488f8dae44875c372, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732130874278 2024-11-20T19:27:58,746 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting a189aa6b8af04dd9a69fb3c859485705, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1732130876429 2024-11-20T19:27:58,757 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#C#compaction#587 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:58,757 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/d9701cbc30ec48eaa5327c5e43b8caa1 is 50, key is test_row_0/C:col10/1732130878548/Put/seqid=0 2024-11-20T19:27:58,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742521_1697 (size=12815) 2024-11-20T19:27:58,766 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/d9701cbc30ec48eaa5327c5e43b8caa1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d9701cbc30ec48eaa5327c5e43b8caa1 2024-11-20T19:27:58,771 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/C of 6e8af0e10da4be5fa330b00646bb6e13 into d9701cbc30ec48eaa5327c5e43b8caa1(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:58,771 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:58,771 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/C, priority=13, startTime=1732130878683; duration=0sec 2024-11-20T19:27:58,771 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:58,771 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:C 2024-11-20T19:27:58,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:58,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T19:27:58,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:58,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:58,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:58,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:58,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:58,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:58,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207a630def63d84ffd90a124da0f90e997_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130878566/Put/seqid=0 2024-11-20T19:27:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:58,899 INFO [Thread-2737 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-20T19:27:58,901 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:58,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-20T19:27:58,902 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:58,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T19:27:58,903 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:58,903 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:58,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742522_1698 (size=12454) 2024-11-20T19:27:58,915 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:58,919 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207a630def63d84ffd90a124da0f90e997_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207a630def63d84ffd90a124da0f90e997_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:58,920 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/e98ad0fc91434e7fa93db379a8de06d0, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:58,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/e98ad0fc91434e7fa93db379a8de06d0 is 175, key is test_row_0/A:col10/1732130878566/Put/seqid=0 2024-11-20T19:27:58,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:58,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130938922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:58,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:58,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130938927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:58,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742523_1699 (size=31255) 2024-11-20T19:27:58,951 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/e98ad0fc91434e7fa93db379a8de06d0 2024-11-20T19:27:58,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/c4389310c8dc4297a497b7938fa6da6a is 50, key is test_row_0/B:col10/1732130878566/Put/seqid=0 2024-11-20T19:27:58,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742524_1700 (size=12301) 2024-11-20T19:27:59,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T19:27:59,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130939028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:59,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130939030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,054 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T19:27:59,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:59,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:59,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:59,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:59,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:59,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:59,135 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#A#compaction#585 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:59,136 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/b045bcaad6054e6eb76fcff2af637faf is 175, key is test_row_0/A:col10/1732130878548/Put/seqid=0 2024-11-20T19:27:59,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742525_1701 (size=31769) 2024-11-20T19:27:59,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T19:27:59,207 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T19:27:59,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:59,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:59,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:59,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:59,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:59,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:59,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130939229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130939233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,360 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T19:27:59,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:59,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:59,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:59,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:59,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:59,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:59,372 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/c4389310c8dc4297a497b7938fa6da6a 2024-11-20T19:27:59,380 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/36da921c16524cc4974b0ac3926c1ee4 is 50, key is test_row_0/C:col10/1732130878566/Put/seqid=0 2024-11-20T19:27:59,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742526_1702 (size=12301) 2024-11-20T19:27:59,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/36da921c16524cc4974b0ac3926c1ee4 2024-11-20T19:27:59,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/e98ad0fc91434e7fa93db379a8de06d0 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/e98ad0fc91434e7fa93db379a8de06d0 2024-11-20T19:27:59,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/e98ad0fc91434e7fa93db379a8de06d0, entries=150, sequenceid=276, filesize=30.5 K 2024-11-20T19:27:59,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/c4389310c8dc4297a497b7938fa6da6a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/c4389310c8dc4297a497b7938fa6da6a 2024-11-20T19:27:59,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/c4389310c8dc4297a497b7938fa6da6a, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T19:27:59,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/36da921c16524cc4974b0ac3926c1ee4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/36da921c16524cc4974b0ac3926c1ee4 2024-11-20T19:27:59,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/36da921c16524cc4974b0ac3926c1ee4, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T19:27:59,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6e8af0e10da4be5fa330b00646bb6e13 in 568ms, sequenceid=276, compaction requested=false 2024-11-20T19:27:59,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:59,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T19:27:59,512 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T19:27:59,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:27:59,513 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T19:27:59,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:27:59,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:59,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:27:59,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:59,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:27:59,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:59,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112016a70a096bbc41d58116d33ba259a0e3_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130878915/Put/seqid=0 2024-11-20T19:27:59,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:59,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:27:59,545 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/b045bcaad6054e6eb76fcff2af637faf as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b045bcaad6054e6eb76fcff2af637faf 2024-11-20T19:27:59,555 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/A of 6e8af0e10da4be5fa330b00646bb6e13 into b045bcaad6054e6eb76fcff2af637faf(size=31.0 K), total size for store is 61.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:59,555 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:27:59,555 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/A, priority=13, startTime=1732130878683; duration=0sec 2024-11-20T19:27:59,555 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:59,555 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:A 2024-11-20T19:27:59,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:59,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130939563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742527_1703 (size=12454) 2024-11-20T19:27:59,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:59,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130939566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,572 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112016a70a096bbc41d58116d33ba259a0e3_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112016a70a096bbc41d58116d33ba259a0e3_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:27:59,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/a06c60343e674aa0989d21110de66cf8, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:27:59,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/a06c60343e674aa0989d21110de66cf8 is 175, key is test_row_0/A:col10/1732130878915/Put/seqid=0 2024-11-20T19:27:59,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742528_1704 (size=31255) 2024-11-20T19:27:59,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:59,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130939670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:59,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130939670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,774 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T19:27:59,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130939872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:27:59,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130939873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T19:28:00,009 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=298, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/a06c60343e674aa0989d21110de66cf8 2024-11-20T19:28:00,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/dbfe97ac6ac841d9adf970015db866b1 is 50, key is test_row_0/B:col10/1732130878915/Put/seqid=0 2024-11-20T19:28:00,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742529_1705 (size=12301) 2024-11-20T19:28:00,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130940177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:00,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130940178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:00,449 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/dbfe97ac6ac841d9adf970015db866b1 2024-11-20T19:28:00,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/8a3734a512264c8c9c50521738e684a9 is 50, key is test_row_0/C:col10/1732130878915/Put/seqid=0 2024-11-20T19:28:00,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742530_1706 (size=12301) 2024-11-20T19:28:00,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:00,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130940681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:00,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:00,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130940683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:00,857 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/8a3734a512264c8c9c50521738e684a9 2024-11-20T19:28:00,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/a06c60343e674aa0989d21110de66cf8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a06c60343e674aa0989d21110de66cf8 2024-11-20T19:28:00,862 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a06c60343e674aa0989d21110de66cf8, entries=150, sequenceid=298, filesize=30.5 K 2024-11-20T19:28:00,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/dbfe97ac6ac841d9adf970015db866b1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/dbfe97ac6ac841d9adf970015db866b1 2024-11-20T19:28:00,865 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/dbfe97ac6ac841d9adf970015db866b1, entries=150, sequenceid=298, filesize=12.0 K 2024-11-20T19:28:00,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/8a3734a512264c8c9c50521738e684a9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/8a3734a512264c8c9c50521738e684a9 2024-11-20T19:28:00,868 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/8a3734a512264c8c9c50521738e684a9, entries=150, sequenceid=298, filesize=12.0 K 2024-11-20T19:28:00,868 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 6e8af0e10da4be5fa330b00646bb6e13 in 1355ms, sequenceid=298, compaction requested=true 2024-11-20T19:28:00,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:28:00,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:00,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-20T19:28:00,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-20T19:28:00,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-20T19:28:00,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9660 sec 2024-11-20T19:28:00,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.9690 sec 2024-11-20T19:28:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T19:28:01,006 INFO [Thread-2737 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-20T19:28:01,007 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:28:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-20T19:28:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:28:01,008 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:28:01,009 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:28:01,009 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:28:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:28:01,159 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:01,160 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T19:28:01,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:01,160 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T19:28:01,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:28:01,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:01,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:28:01,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:01,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:28:01,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:01,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112006bafa37068a4429b1e5bdf09b5b4b24_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130879565/Put/seqid=0 2024-11-20T19:28:01,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742531_1707 (size=12454) 2024-11-20T19:28:01,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:28:01,177 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112006bafa37068a4429b1e5bdf09b5b4b24_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112006bafa37068a4429b1e5bdf09b5b4b24_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:01,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/b53ac85351004505b24c2a5c97e5796b, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:28:01,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/b53ac85351004505b24c2a5c97e5796b is 175, key is test_row_0/A:col10/1732130879565/Put/seqid=0 2024-11-20T19:28:01,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742532_1708 (size=31255) 2024-11-20T19:28:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:28:01,582 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=313, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/b53ac85351004505b24c2a5c97e5796b 2024-11-20T19:28:01,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/9a967b0c8ab5465a883d698d78afde6a is 50, key is test_row_0/B:col10/1732130879565/Put/seqid=0 2024-11-20T19:28:01,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742533_1709 (size=12301) 2024-11-20T19:28:01,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:28:01,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:28:01,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:01,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:01,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130941717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:01,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:01,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130941719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:01,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:01,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130941820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:01,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:01,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130941822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:01,990 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/9a967b0c8ab5465a883d698d78afde6a 2024-11-20T19:28:01,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/409d9a1e9f2148ac88d3702567906369 is 50, key is test_row_0/C:col10/1732130879565/Put/seqid=0 2024-11-20T19:28:01,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742534_1710 (size=12301) 2024-11-20T19:28:02,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:02,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130942023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:02,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:02,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130942025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:02,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:28:02,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:02,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130942327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:02,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:02,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130942329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:02,398 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/409d9a1e9f2148ac88d3702567906369 2024-11-20T19:28:02,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/b53ac85351004505b24c2a5c97e5796b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b53ac85351004505b24c2a5c97e5796b 2024-11-20T19:28:02,403 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b53ac85351004505b24c2a5c97e5796b, entries=150, sequenceid=313, filesize=30.5 K 2024-11-20T19:28:02,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/9a967b0c8ab5465a883d698d78afde6a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/9a967b0c8ab5465a883d698d78afde6a 2024-11-20T19:28:02,406 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/9a967b0c8ab5465a883d698d78afde6a, entries=150, sequenceid=313, filesize=12.0 K 2024-11-20T19:28:02,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/409d9a1e9f2148ac88d3702567906369 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/409d9a1e9f2148ac88d3702567906369 2024-11-20T19:28:02,411 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/409d9a1e9f2148ac88d3702567906369, entries=150, sequenceid=313, filesize=12.0 K 2024-11-20T19:28:02,412 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6e8af0e10da4be5fa330b00646bb6e13 in 1252ms, sequenceid=313, compaction requested=true 2024-11-20T19:28:02,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:28:02,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:02,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-20T19:28:02,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-20T19:28:02,414 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-20T19:28:02,414 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4040 sec 2024-11-20T19:28:02,414 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.4070 sec 2024-11-20T19:28:02,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:02,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:28:02,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:28:02,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:02,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:28:02,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:02,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:28:02,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:02,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206d3e7bbf780c47b3bc1b216c199719f1_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130882734/Put/seqid=0 2024-11-20T19:28:02,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742535_1711 (size=14994) 2024-11-20T19:28:02,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:02,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130942761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:02,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:02,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130942763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:02,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:02,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130942832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:02,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:02,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130942834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:02,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:02,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130942865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:02,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:02,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130942867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:03,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130943068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:03,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130943072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:28:03,111 INFO [Thread-2737 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-20T19:28:03,112 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:28:03,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-20T19:28:03,113 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:28:03,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T19:28:03,114 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:28:03,114 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:28:03,143 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:28:03,145 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206d3e7bbf780c47b3bc1b216c199719f1_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206d3e7bbf780c47b3bc1b216c199719f1_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:03,146 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/822f9cc00a8d40119b634a9145529675, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:28:03,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/822f9cc00a8d40119b634a9145529675 is 175, key is test_row_0/A:col10/1732130882734/Put/seqid=0 2024-11-20T19:28:03,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742536_1712 (size=39949) 2024-11-20T19:28:03,160 DEBUG [regionserver/db9c3a6c6492:0.Chore.1 {}] throttle.PressureAwareCompactionThroughputController(103): CompactionPressure is 0.07692307692307693, tune throughput to 53.85 MB/second 2024-11-20T19:28:03,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T19:28:03,265 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T19:28:03,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:03,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:28:03,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:03,265 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:28:03,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:28:03,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:28:03,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:03,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130943372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:03,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130943377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T19:28:03,417 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T19:28:03,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:03,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:28:03,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:03,417 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:28:03,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:28:03,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:28:03,549 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=336, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/822f9cc00a8d40119b634a9145529675 2024-11-20T19:28:03,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/599e8246b3b7473ca7965cf4530d1372 is 50, key is test_row_0/B:col10/1732130882734/Put/seqid=0 2024-11-20T19:28:03,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742537_1713 (size=12301) 2024-11-20T19:28:03,569 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/599e8246b3b7473ca7965cf4530d1372 2024-11-20T19:28:03,569 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T19:28:03,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:03,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:28:03,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:03,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:28:03,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:28:03,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:28:03,579 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/404c3d7fb02f43079195c8e09077b51b is 50, key is test_row_0/C:col10/1732130882734/Put/seqid=0 2024-11-20T19:28:03,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742538_1714 (size=12301) 2024-11-20T19:28:03,590 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/404c3d7fb02f43079195c8e09077b51b 2024-11-20T19:28:03,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/822f9cc00a8d40119b634a9145529675 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/822f9cc00a8d40119b634a9145529675 2024-11-20T19:28:03,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/822f9cc00a8d40119b634a9145529675, entries=200, sequenceid=336, filesize=39.0 K 2024-11-20T19:28:03,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/599e8246b3b7473ca7965cf4530d1372 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/599e8246b3b7473ca7965cf4530d1372 2024-11-20T19:28:03,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/599e8246b3b7473ca7965cf4530d1372, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T19:28:03,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/404c3d7fb02f43079195c8e09077b51b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/404c3d7fb02f43079195c8e09077b51b 2024-11-20T19:28:03,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/404c3d7fb02f43079195c8e09077b51b, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T19:28:03,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6e8af0e10da4be5fa330b00646bb6e13 in 871ms, sequenceid=336, compaction requested=true 2024-11-20T19:28:03,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:28:03,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:28:03,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:28:03,607 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T19:28:03,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:28:03,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:28:03,607 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T19:28:03,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:28:03,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:28:03,608 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 165483 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T19:28:03,608 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62019 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T19:28:03,608 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/B is initiating minor compaction (all files) 2024-11-20T19:28:03,608 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/A is initiating minor compaction (all files) 2024-11-20T19:28:03,608 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/B in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:03,608 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/A in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:03,608 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b045bcaad6054e6eb76fcff2af637faf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/e98ad0fc91434e7fa93db379a8de06d0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a06c60343e674aa0989d21110de66cf8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b53ac85351004505b24c2a5c97e5796b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/822f9cc00a8d40119b634a9145529675] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=161.6 K 2024-11-20T19:28:03,608 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/b9ba72fb814e47eeb9c047403e37397b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/c4389310c8dc4297a497b7938fa6da6a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/dbfe97ac6ac841d9adf970015db866b1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/9a967b0c8ab5465a883d698d78afde6a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/599e8246b3b7473ca7965cf4530d1372] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=60.6 K 2024-11-20T19:28:03,609 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:03,609 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. files: [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b045bcaad6054e6eb76fcff2af637faf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/e98ad0fc91434e7fa93db379a8de06d0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a06c60343e674aa0989d21110de66cf8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b53ac85351004505b24c2a5c97e5796b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/822f9cc00a8d40119b634a9145529675] 2024-11-20T19:28:03,609 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting b9ba72fb814e47eeb9c047403e37397b, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1732130876429 2024-11-20T19:28:03,609 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting c4389310c8dc4297a497b7938fa6da6a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732130878566 2024-11-20T19:28:03,609 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting b045bcaad6054e6eb76fcff2af637faf, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1732130876429 2024-11-20T19:28:03,610 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting dbfe97ac6ac841d9adf970015db866b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732130878911 2024-11-20T19:28:03,610 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting e98ad0fc91434e7fa93db379a8de06d0, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732130878566 2024-11-20T19:28:03,610 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting a06c60343e674aa0989d21110de66cf8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732130878911 2024-11-20T19:28:03,610 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a967b0c8ab5465a883d698d78afde6a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732130879540 2024-11-20T19:28:03,610 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting b53ac85351004505b24c2a5c97e5796b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732130879540 2024-11-20T19:28:03,610 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 599e8246b3b7473ca7965cf4530d1372, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732130881716 2024-11-20T19:28:03,611 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] compactions.Compactor(224): Compacting 822f9cc00a8d40119b634a9145529675, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732130881711 2024-11-20T19:28:03,631 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:28:03,633 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#B#compaction#600 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-20T19:28:03,633 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/d9224aa228d7413d8bf324bc8450f591 is 50, key is test_row_0/B:col10/1732130882734/Put/seqid=0 2024-11-20T19:28:03,637 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411206c859d33d3b84db396c1e56afc4eb4a1_6e8af0e10da4be5fa330b00646bb6e13 store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:28:03,640 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411206c859d33d3b84db396c1e56afc4eb4a1_6e8af0e10da4be5fa330b00646bb6e13, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:28:03,641 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206c859d33d3b84db396c1e56afc4eb4a1_6e8af0e10da4be5fa330b00646bb6e13 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:28:03,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742540_1716 (size=4469) 2024-11-20T19:28:03,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742539_1715 (size=13085) 2024-11-20T19:28:03,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T19:28:03,722 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41229 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T19:28:03,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:03,722 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:28:03,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:28:03,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:03,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:28:03,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:03,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:28:03,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:03,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fb61b2c3d7874df3b368aed21f34c5a7_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130882762/Put/seqid=0 2024-11-20T19:28:03,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742541_1717 (size=12454) 2024-11-20T19:28:03,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:28:03,740 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fb61b2c3d7874df3b368aed21f34c5a7_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb61b2c3d7874df3b368aed21f34c5a7_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:03,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7fad1f5d3f774ea38047b0b2f2cee18e, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:28:03,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7fad1f5d3f774ea38047b0b2f2cee18e is 175, key is test_row_0/A:col10/1732130882762/Put/seqid=0 2024-11-20T19:28:03,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742542_1718 (size=31255) 2024-11-20T19:28:03,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:03,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. as already flushing 2024-11-20T19:28:03,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:03,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130943891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:03,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130943892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:03,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:03,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130943893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130943894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:03,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:04,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130943997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:04,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:04,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130943997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:04,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:04,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130944000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:04,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:04,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130944000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:04,062 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#A#compaction#601 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-20T19:28:04,062 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/29eff580f1f94b70ac08b422fb4dc120 is 175, key is test_row_0/A:col10/1732130882734/Put/seqid=0 2024-11-20T19:28:04,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742543_1719 (size=32039) 2024-11-20T19:28:04,069 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/d9224aa228d7413d8bf324bc8450f591 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d9224aa228d7413d8bf324bc8450f591 2024-11-20T19:28:04,073 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/B of 6e8af0e10da4be5fa330b00646bb6e13 into d9224aa228d7413d8bf324bc8450f591(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:28:04,074 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:28:04,074 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/B, priority=11, startTime=1732130883607; duration=0sec 2024-11-20T19:28:04,074 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:28:04,074 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:B 2024-11-20T19:28:04,074 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T19:28:04,076 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62019 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T19:28:04,076 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1540): 6e8af0e10da4be5fa330b00646bb6e13/C is initiating minor compaction (all files) 2024-11-20T19:28:04,076 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e8af0e10da4be5fa330b00646bb6e13/C in TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:04,076 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d9701cbc30ec48eaa5327c5e43b8caa1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/36da921c16524cc4974b0ac3926c1ee4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/8a3734a512264c8c9c50521738e684a9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/409d9a1e9f2148ac88d3702567906369, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/404c3d7fb02f43079195c8e09077b51b] into tmpdir=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp, totalSize=60.6 K 2024-11-20T19:28:04,076 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting d9701cbc30ec48eaa5327c5e43b8caa1, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1732130876429 2024-11-20T19:28:04,077 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 36da921c16524cc4974b0ac3926c1ee4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732130878566 2024-11-20T19:28:04,077 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a3734a512264c8c9c50521738e684a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732130878911 2024-11-20T19:28:04,077 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 409d9a1e9f2148ac88d3702567906369, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732130879540 2024-11-20T19:28:04,077 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] compactions.Compactor(224): Compacting 404c3d7fb02f43079195c8e09077b51b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732130881716 2024-11-20T19:28:04,088 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e8af0e10da4be5fa330b00646bb6e13#C#compaction#603 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-20T19:28:04,089 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/71ab47d8b7a84ec48b13bf37105e53b9 is 50, key is test_row_0/C:col10/1732130882734/Put/seqid=0 2024-11-20T19:28:04,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742544_1720 (size=13085) 2024-11-20T19:28:04,157 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=349, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7fad1f5d3f774ea38047b0b2f2cee18e 2024-11-20T19:28:04,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/22a57867772e4a638eaf6e117a8275b8 is 50, key is test_row_0/B:col10/1732130882762/Put/seqid=0 2024-11-20T19:28:04,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742545_1721 (size=12301) 2024-11-20T19:28:04,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:04,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130944201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:04,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:04,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130944202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:04,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:04,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130944204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:04,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:04,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130944205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:04,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T19:28:04,470 DEBUG [Thread-2744 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11c440f7 to 127.0.0.1:49985 2024-11-20T19:28:04,470 DEBUG [Thread-2744 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:04,471 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/29eff580f1f94b70ac08b422fb4dc120 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/29eff580f1f94b70ac08b422fb4dc120 2024-11-20T19:28:04,473 DEBUG [Thread-2746 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58460ef3 to 127.0.0.1:49985 2024-11-20T19:28:04,473 DEBUG [Thread-2746 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:04,475 DEBUG [Thread-2738 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60d631a3 to 127.0.0.1:49985 2024-11-20T19:28:04,475 DEBUG [Thread-2740 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58971172 to 127.0.0.1:49985 2024-11-20T19:28:04,475 DEBUG [Thread-2740 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:04,475 DEBUG [Thread-2738 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:04,475 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/A of 6e8af0e10da4be5fa330b00646bb6e13 into 29eff580f1f94b70ac08b422fb4dc120(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:28:04,475 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:28:04,475 INFO [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/A, priority=11, startTime=1732130883607; duration=0sec 2024-11-20T19:28:04,476 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:28:04,476 DEBUG [RS:0;db9c3a6c6492:41229-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:A 2024-11-20T19:28:04,476 DEBUG [Thread-2742 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d7fe93b to 127.0.0.1:49985 2024-11-20T19:28:04,476 DEBUG [Thread-2742 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:04,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:04,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130944507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:04,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:04,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130944508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:04,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:04,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130944508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:04,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:04,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130944509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:04,524 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/71ab47d8b7a84ec48b13bf37105e53b9 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/71ab47d8b7a84ec48b13bf37105e53b9 2024-11-20T19:28:04,528 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 6e8af0e10da4be5fa330b00646bb6e13/C of 6e8af0e10da4be5fa330b00646bb6e13 into 71ab47d8b7a84ec48b13bf37105e53b9(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:28:04,528 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:28:04,528 INFO [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13., storeName=6e8af0e10da4be5fa330b00646bb6e13/C, priority=11, startTime=1732130883607; duration=0sec 2024-11-20T19:28:04,528 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:28:04,528 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:C 2024-11-20T19:28:04,601 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/22a57867772e4a638eaf6e117a8275b8 2024-11-20T19:28:04,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/afb7b4ed6129497d9c74538023bdcb0f is 50, key is test_row_0/C:col10/1732130882762/Put/seqid=0 2024-11-20T19:28:04,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742546_1722 (size=12301) 2024-11-20T19:28:05,011 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/afb7b4ed6129497d9c74538023bdcb0f 2024-11-20T19:28:05,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:05,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:05,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:05,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46576 deadline: 1732130945011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:05,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46540 deadline: 1732130945011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:05,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46548 deadline: 1732130945011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:05,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:28:05,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46578 deadline: 1732130945011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:05,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/7fad1f5d3f774ea38047b0b2f2cee18e as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7fad1f5d3f774ea38047b0b2f2cee18e 2024-11-20T19:28:05,023 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7fad1f5d3f774ea38047b0b2f2cee18e, entries=150, sequenceid=349, filesize=30.5 K 2024-11-20T19:28:05,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/22a57867772e4a638eaf6e117a8275b8 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/22a57867772e4a638eaf6e117a8275b8 2024-11-20T19:28:05,028 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/22a57867772e4a638eaf6e117a8275b8, entries=150, sequenceid=349, filesize=12.0 K 2024-11-20T19:28:05,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/afb7b4ed6129497d9c74538023bdcb0f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/afb7b4ed6129497d9c74538023bdcb0f 2024-11-20T19:28:05,031 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/afb7b4ed6129497d9c74538023bdcb0f, entries=150, sequenceid=349, filesize=12.0 K 2024-11-20T19:28:05,032 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6e8af0e10da4be5fa330b00646bb6e13 in 1310ms, sequenceid=349, compaction requested=false 2024-11-20T19:28:05,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:28:05,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:05,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-20T19:28:05,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-20T19:28:05,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-20T19:28:05,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9190 sec 2024-11-20T19:28:05,035 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.9220 sec 2024-11-20T19:28:05,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T19:28:05,217 INFO [Thread-2737 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-20T19:28:05,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41229 {}] regionserver.HRegion(8581): Flush requested on 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:05,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:28:05,939 DEBUG [Thread-2733 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c9b5141 to 127.0.0.1:49985 2024-11-20T19:28:05,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:28:05,939 DEBUG [Thread-2733 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:05,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:05,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:28:05,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:05,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:28:05,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:05,944 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112052cb774883fa4bb6aec0e31398be915f_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130883891/Put/seqid=0 2024-11-20T19:28:05,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742547_1723 (size=12454) 2024-11-20T19:28:06,017 DEBUG [Thread-2735 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11a52cdf to 127.0.0.1:49985 2024-11-20T19:28:06,017 DEBUG [Thread-2731 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0644b7e6 to 127.0.0.1:49985 2024-11-20T19:28:06,017 DEBUG [Thread-2735 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:06,017 DEBUG [Thread-2731 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:06,021 DEBUG [Thread-2727 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2cbfd84f to 127.0.0.1:49985 2024-11-20T19:28:06,021 DEBUG [Thread-2729 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3fb684eb to 127.0.0.1:49985 2024-11-20T19:28:06,021 DEBUG [Thread-2729 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:06,021 DEBUG [Thread-2727 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 13 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 129 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 121 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 3 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5586 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5596 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5455 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5533 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5620 2024-11-20T19:28:06,021 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T19:28:06,021 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:28:06,021 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x635b1751 to 127.0.0.1:49985 2024-11-20T19:28:06,021 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:06,022 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T19:28:06,022 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T19:28:06,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T19:28:06,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:28:06,024 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130886024"}]},"ts":"1732130886024"} 2024-11-20T19:28:06,025 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T19:28:06,048 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T19:28:06,049 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:28:06,050 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e8af0e10da4be5fa330b00646bb6e13, UNASSIGN}] 2024-11-20T19:28:06,050 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e8af0e10da4be5fa330b00646bb6e13, UNASSIGN 2024-11-20T19:28:06,051 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=6e8af0e10da4be5fa330b00646bb6e13, regionState=CLOSING, regionLocation=db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:06,052 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:28:06,052 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; CloseRegionProcedure 6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496}] 2024-11-20T19:28:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:28:06,203 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:06,204 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(124): Close 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:06,204 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:28:06,204 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1681): Closing 6e8af0e10da4be5fa330b00646bb6e13, disabling compactions & flushes 2024-11-20T19:28:06,204 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:28:06,347 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:28:06,350 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112052cb774883fa4bb6aec0e31398be915f_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112052cb774883fa4bb6aec0e31398be915f_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:06,351 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/0993a67cf03d4c778eb209e2e7ad0794, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:28:06,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/0993a67cf03d4c778eb209e2e7ad0794 is 175, key is test_row_0/A:col10/1732130883891/Put/seqid=0 2024-11-20T19:28:06,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742548_1724 (size=31255) 2024-11-20T19:28:06,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:28:06,755 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=376, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/0993a67cf03d4c778eb209e2e7ad0794 2024-11-20T19:28:06,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/cab519206d1c41239da9317ccbf9b8bb is 50, key is test_row_0/B:col10/1732130883891/Put/seqid=0 2024-11-20T19:28:06,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742549_1725 (size=12301) 2024-11-20T19:28:07,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:28:07,167 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/cab519206d1c41239da9317ccbf9b8bb 2024-11-20T19:28:07,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/a5c0c02c3e7749dc8e3fbae331e0c4bc is 50, key is test_row_0/C:col10/1732130883891/Put/seqid=0 2024-11-20T19:28:07,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742550_1726 (size=12301) 2024-11-20T19:28:07,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/a5c0c02c3e7749dc8e3fbae331e0c4bc 2024-11-20T19:28:07,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/0993a67cf03d4c778eb209e2e7ad0794 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/0993a67cf03d4c778eb209e2e7ad0794 2024-11-20T19:28:07,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/0993a67cf03d4c778eb209e2e7ad0794, entries=150, sequenceid=376, filesize=30.5 K 2024-11-20T19:28:07,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/cab519206d1c41239da9317ccbf9b8bb as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/cab519206d1c41239da9317ccbf9b8bb 2024-11-20T19:28:07,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/cab519206d1c41239da9317ccbf9b8bb, entries=150, sequenceid=376, filesize=12.0 K 2024-11-20T19:28:07,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/a5c0c02c3e7749dc8e3fbae331e0c4bc as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a5c0c02c3e7749dc8e3fbae331e0c4bc 2024-11-20T19:28:07,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a5c0c02c3e7749dc8e3fbae331e0c4bc, entries=150, sequenceid=376, filesize=12.0 K 2024-11-20T19:28:07,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=26.84 KB/27480 for 6e8af0e10da4be5fa330b00646bb6e13 in 1653ms, sequenceid=376, compaction requested=true 2024-11-20T19:28:07,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:28:07,591 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:07,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:28:07,591 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:07,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:28:07,591 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. after waiting 0 ms 2024-11-20T19:28:07,591 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. because compaction request was cancelled 2024-11-20T19:28:07,591 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:07,592 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:A 2024-11-20T19:28:07,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:28:07,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:28:07,592 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. because compaction request was cancelled 2024-11-20T19:28:07,592 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:B 2024-11-20T19:28:07,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e8af0e10da4be5fa330b00646bb6e13:C, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:28:07,592 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. because compaction request was cancelled 2024-11-20T19:28:07,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:28:07,592 DEBUG [RS:0;db9c3a6c6492:41229-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e8af0e10da4be5fa330b00646bb6e13:C 2024-11-20T19:28:07,592 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(2837): Flushing 6e8af0e10da4be5fa330b00646bb6e13 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T19:28:07,592 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=A 2024-11-20T19:28:07,592 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:07,592 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=B 2024-11-20T19:28:07,592 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:07,592 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e8af0e10da4be5fa330b00646bb6e13, store=C 2024-11-20T19:28:07,592 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:28:07,596 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c54b4009876147f691affc0aab6b7af2_6e8af0e10da4be5fa330b00646bb6e13 is 50, key is test_row_0/A:col10/1732130886016/Put/seqid=0 2024-11-20T19:28:07,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742551_1727 (size=9914) 2024-11-20T19:28:07,600 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:28:07,603 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c54b4009876147f691affc0aab6b7af2_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c54b4009876147f691affc0aab6b7af2_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:07,604 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/0858da7792a14d4493fa2503fff32a8b, store: [table=TestAcidGuarantees family=A region=6e8af0e10da4be5fa330b00646bb6e13] 2024-11-20T19:28:07,604 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/0858da7792a14d4493fa2503fff32a8b is 175, key is test_row_0/A:col10/1732130886016/Put/seqid=0 2024-11-20T19:28:07,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742552_1728 (size=22561) 2024-11-20T19:28:08,007 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=383, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/0858da7792a14d4493fa2503fff32a8b 2024-11-20T19:28:08,013 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/6591a2bd4c104da69a3a29668d0cd424 is 50, key is test_row_0/B:col10/1732130886016/Put/seqid=0 2024-11-20T19:28:08,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742553_1729 (size=9857) 2024-11-20T19:28:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:28:08,418 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/6591a2bd4c104da69a3a29668d0cd424 2024-11-20T19:28:08,427 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/f9062ba6e0434586b1fb4856c04d2cb4 is 50, key is test_row_0/C:col10/1732130886016/Put/seqid=0 2024-11-20T19:28:08,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742554_1730 (size=9857) 2024-11-20T19:28:08,830 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/f9062ba6e0434586b1fb4856c04d2cb4 2024-11-20T19:28:08,833 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/A/0858da7792a14d4493fa2503fff32a8b as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/0858da7792a14d4493fa2503fff32a8b 2024-11-20T19:28:08,837 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/0858da7792a14d4493fa2503fff32a8b, entries=100, sequenceid=383, filesize=22.0 K 2024-11-20T19:28:08,837 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/B/6591a2bd4c104da69a3a29668d0cd424 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/6591a2bd4c104da69a3a29668d0cd424 2024-11-20T19:28:08,840 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/6591a2bd4c104da69a3a29668d0cd424, entries=100, sequenceid=383, filesize=9.6 K 2024-11-20T19:28:08,840 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/.tmp/C/f9062ba6e0434586b1fb4856c04d2cb4 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/f9062ba6e0434586b1fb4856c04d2cb4 2024-11-20T19:28:08,843 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/f9062ba6e0434586b1fb4856c04d2cb4, entries=100, sequenceid=383, filesize=9.6 K 2024-11-20T19:28:08,843 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 6e8af0e10da4be5fa330b00646bb6e13 in 1251ms, sequenceid=383, compaction requested=true 2024-11-20T19:28:08,844 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/089425f42ffc466eb41795981ab7ae0c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1fa601a545a349de9442bb1a1a58be6a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/5d543024ff4c48aba035308a93ee8d41, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/6d8f921f66a944c7bc5b5056aad92129, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b2eca50e75ac4f68a07cb18ff2427e9c, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a762e17f2e5141e4a9f2f3bad0582262, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7c438ec58f1d45db81cd6e0b8ec37ec9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/480464d76a61432c8c83fb3054cfcb28, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a8131681dfc94ab7933305a1f796d6aa, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/2d4e41ddb0cd49e48f89e93cb5c323e7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7a375fd84107458f86ff91288aad9653, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/9a96133d8aee465a9c9ae152dc4bb6ce, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/8c669a1925094fa894feada7107d529a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/86c830189fcc4405a72241aa5502e709, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1f9606fa65b54aa39b0f0cd17b8531dd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/d0f333c99ef04db38bab15ba5cb94ac5, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/16abe1552461490eae63a90b460ffc89, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7ef1f2ab67f84ebc95df26cbb9631625, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b045bcaad6054e6eb76fcff2af637faf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/e98ad0fc91434e7fa93db379a8de06d0, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a06c60343e674aa0989d21110de66cf8, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b53ac85351004505b24c2a5c97e5796b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/822f9cc00a8d40119b634a9145529675] to archive 2024-11-20T19:28:08,845 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:28:08,847 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/089425f42ffc466eb41795981ab7ae0c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/089425f42ffc466eb41795981ab7ae0c 2024-11-20T19:28:08,848 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1fa601a545a349de9442bb1a1a58be6a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1fa601a545a349de9442bb1a1a58be6a 2024-11-20T19:28:08,849 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/5d543024ff4c48aba035308a93ee8d41 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/5d543024ff4c48aba035308a93ee8d41 2024-11-20T19:28:08,849 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/6d8f921f66a944c7bc5b5056aad92129 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/6d8f921f66a944c7bc5b5056aad92129 2024-11-20T19:28:08,850 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b2eca50e75ac4f68a07cb18ff2427e9c to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b2eca50e75ac4f68a07cb18ff2427e9c 2024-11-20T19:28:08,851 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a762e17f2e5141e4a9f2f3bad0582262 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a762e17f2e5141e4a9f2f3bad0582262 2024-11-20T19:28:08,852 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7c438ec58f1d45db81cd6e0b8ec37ec9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7c438ec58f1d45db81cd6e0b8ec37ec9 2024-11-20T19:28:08,853 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/480464d76a61432c8c83fb3054cfcb28 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/480464d76a61432c8c83fb3054cfcb28 2024-11-20T19:28:08,854 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a8131681dfc94ab7933305a1f796d6aa to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a8131681dfc94ab7933305a1f796d6aa 2024-11-20T19:28:08,855 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/2d4e41ddb0cd49e48f89e93cb5c323e7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/2d4e41ddb0cd49e48f89e93cb5c323e7 2024-11-20T19:28:08,856 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7a375fd84107458f86ff91288aad9653 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7a375fd84107458f86ff91288aad9653 2024-11-20T19:28:08,856 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/9a96133d8aee465a9c9ae152dc4bb6ce to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/9a96133d8aee465a9c9ae152dc4bb6ce 2024-11-20T19:28:08,857 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/8c669a1925094fa894feada7107d529a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/8c669a1925094fa894feada7107d529a 2024-11-20T19:28:08,858 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/86c830189fcc4405a72241aa5502e709 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/86c830189fcc4405a72241aa5502e709 2024-11-20T19:28:08,858 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1f9606fa65b54aa39b0f0cd17b8531dd to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/1f9606fa65b54aa39b0f0cd17b8531dd 2024-11-20T19:28:08,859 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/d0f333c99ef04db38bab15ba5cb94ac5 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/d0f333c99ef04db38bab15ba5cb94ac5 2024-11-20T19:28:08,860 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/16abe1552461490eae63a90b460ffc89 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/16abe1552461490eae63a90b460ffc89 2024-11-20T19:28:08,861 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7ef1f2ab67f84ebc95df26cbb9631625 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7ef1f2ab67f84ebc95df26cbb9631625 2024-11-20T19:28:08,862 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b045bcaad6054e6eb76fcff2af637faf to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b045bcaad6054e6eb76fcff2af637faf 2024-11-20T19:28:08,863 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/e98ad0fc91434e7fa93db379a8de06d0 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/e98ad0fc91434e7fa93db379a8de06d0 2024-11-20T19:28:08,864 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a06c60343e674aa0989d21110de66cf8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/a06c60343e674aa0989d21110de66cf8 2024-11-20T19:28:08,864 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b53ac85351004505b24c2a5c97e5796b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/b53ac85351004505b24c2a5c97e5796b 2024-11-20T19:28:08,865 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/822f9cc00a8d40119b634a9145529675 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/822f9cc00a8d40119b634a9145529675 2024-11-20T19:28:08,866 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/da531366f7124c6387ed2c666ae17a85, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/a978658b0801416599a2a3c872ae1e98, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/5d2835ef50d44143ada55d2db8d82f8f, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/eb57aef32ced4c04b0ad5a8871dcc628, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/06ed4fa72a244e58b800266fc04f42f2, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/dd787e852c704a5c8083fa1673ed5067, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/fcf4d62229f44b87b349a33f923f6f2a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/5c8aab5a0fdc485aac66bb758825addc, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/42284fc65d4f43db8ec006a3eee60f5a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/91f6d4bbb3f74cf5b9893f37efe55066, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d622df7f3a634641bf5ec26914b0b947, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/f98017f55b1b4293b08b39a21e331170, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/43dda5364741472db4e2fcf31f1bde31, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/c51d9fdd1f924c74a5beeeabc0acd493, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/ecc4d7decd2a413fbfa796c6706fd229, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/9c84778d60eb491183d3fea262742723, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/cfd7349fd1af4b43b8a1e9000464af7e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/b9ba72fb814e47eeb9c047403e37397b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d3b8fd936da44060945d8b457785b64b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/c4389310c8dc4297a497b7938fa6da6a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/dbfe97ac6ac841d9adf970015db866b1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/9a967b0c8ab5465a883d698d78afde6a, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/599e8246b3b7473ca7965cf4530d1372] to archive 2024-11-20T19:28:08,866 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:28:08,867 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/da531366f7124c6387ed2c666ae17a85 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/da531366f7124c6387ed2c666ae17a85 2024-11-20T19:28:08,868 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/a978658b0801416599a2a3c872ae1e98 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/a978658b0801416599a2a3c872ae1e98 2024-11-20T19:28:08,869 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/5d2835ef50d44143ada55d2db8d82f8f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/5d2835ef50d44143ada55d2db8d82f8f 2024-11-20T19:28:08,870 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/eb57aef32ced4c04b0ad5a8871dcc628 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/eb57aef32ced4c04b0ad5a8871dcc628 2024-11-20T19:28:08,870 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/06ed4fa72a244e58b800266fc04f42f2 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/06ed4fa72a244e58b800266fc04f42f2 2024-11-20T19:28:08,871 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/dd787e852c704a5c8083fa1673ed5067 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/dd787e852c704a5c8083fa1673ed5067 2024-11-20T19:28:08,871 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/fcf4d62229f44b87b349a33f923f6f2a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/fcf4d62229f44b87b349a33f923f6f2a 2024-11-20T19:28:08,872 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/5c8aab5a0fdc485aac66bb758825addc to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/5c8aab5a0fdc485aac66bb758825addc 2024-11-20T19:28:08,872 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/42284fc65d4f43db8ec006a3eee60f5a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/42284fc65d4f43db8ec006a3eee60f5a 2024-11-20T19:28:08,873 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/91f6d4bbb3f74cf5b9893f37efe55066 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/91f6d4bbb3f74cf5b9893f37efe55066 2024-11-20T19:28:08,873 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d622df7f3a634641bf5ec26914b0b947 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d622df7f3a634641bf5ec26914b0b947 2024-11-20T19:28:08,874 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/f98017f55b1b4293b08b39a21e331170 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/f98017f55b1b4293b08b39a21e331170 2024-11-20T19:28:08,874 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/43dda5364741472db4e2fcf31f1bde31 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/43dda5364741472db4e2fcf31f1bde31 2024-11-20T19:28:08,875 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/c51d9fdd1f924c74a5beeeabc0acd493 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/c51d9fdd1f924c74a5beeeabc0acd493 2024-11-20T19:28:08,876 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/ecc4d7decd2a413fbfa796c6706fd229 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/ecc4d7decd2a413fbfa796c6706fd229 2024-11-20T19:28:08,877 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/9c84778d60eb491183d3fea262742723 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/9c84778d60eb491183d3fea262742723 2024-11-20T19:28:08,877 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/cfd7349fd1af4b43b8a1e9000464af7e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/cfd7349fd1af4b43b8a1e9000464af7e 2024-11-20T19:28:08,878 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/b9ba72fb814e47eeb9c047403e37397b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/b9ba72fb814e47eeb9c047403e37397b 2024-11-20T19:28:08,879 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d3b8fd936da44060945d8b457785b64b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d3b8fd936da44060945d8b457785b64b 2024-11-20T19:28:08,880 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/c4389310c8dc4297a497b7938fa6da6a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/c4389310c8dc4297a497b7938fa6da6a 2024-11-20T19:28:08,881 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/dbfe97ac6ac841d9adf970015db866b1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/dbfe97ac6ac841d9adf970015db866b1 2024-11-20T19:28:08,881 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/9a967b0c8ab5465a883d698d78afde6a to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/9a967b0c8ab5465a883d698d78afde6a 2024-11-20T19:28:08,882 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/599e8246b3b7473ca7965cf4530d1372 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/599e8246b3b7473ca7965cf4530d1372 2024-11-20T19:28:08,883 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d9534d62116c4a08912fc5a13c3975f7, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/f25343e315e242f89c25265b00c01e2b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/4b8fa00c3203446fb578ac47547f9d78, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/924ae7ab8ca044f9ae88f95f134bf66b, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/e01b70dd4e0b4d8483db9c987b17d92e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/51a86d1639f44a2ab63afc5f959d4f17, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/3383f5e125fc47f5ad092f4bc14bbfde, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/ace5f45ea3524023bf6b4700365837ad, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/2f0f1659cff24b15b964d5a9b85f7f5e, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a2272a705a4f469bb87e8059dbc2c5cd, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/14386f78fe2d48dab49a5791a7bcd878, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d0dc383a7d634bdbb0222076b0ba3667, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/378cf7cf73f04edfa29a3b79111c7368, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/90ba572a77ac4ee7a689e5197a1857e1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/fc364298852346ef9ea16dfb8c9c96ae, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/1042e92fc7d743e0998d8d9b0f2fbadf, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/5a133eb583f547d488f8dae44875c372, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d9701cbc30ec48eaa5327c5e43b8caa1, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a189aa6b8af04dd9a69fb3c859485705, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/36da921c16524cc4974b0ac3926c1ee4, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/8a3734a512264c8c9c50521738e684a9, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/409d9a1e9f2148ac88d3702567906369, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/404c3d7fb02f43079195c8e09077b51b] to archive 2024-11-20T19:28:08,884 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:28:08,885 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d9534d62116c4a08912fc5a13c3975f7 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d9534d62116c4a08912fc5a13c3975f7 2024-11-20T19:28:08,886 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/f25343e315e242f89c25265b00c01e2b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/f25343e315e242f89c25265b00c01e2b 2024-11-20T19:28:08,887 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/4b8fa00c3203446fb578ac47547f9d78 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/4b8fa00c3203446fb578ac47547f9d78 2024-11-20T19:28:08,889 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/924ae7ab8ca044f9ae88f95f134bf66b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/924ae7ab8ca044f9ae88f95f134bf66b 2024-11-20T19:28:08,890 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/e01b70dd4e0b4d8483db9c987b17d92e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/e01b70dd4e0b4d8483db9c987b17d92e 2024-11-20T19:28:08,891 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/51a86d1639f44a2ab63afc5f959d4f17 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/51a86d1639f44a2ab63afc5f959d4f17 2024-11-20T19:28:08,892 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/3383f5e125fc47f5ad092f4bc14bbfde to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/3383f5e125fc47f5ad092f4bc14bbfde 2024-11-20T19:28:08,893 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/ace5f45ea3524023bf6b4700365837ad to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/ace5f45ea3524023bf6b4700365837ad 2024-11-20T19:28:08,894 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/2f0f1659cff24b15b964d5a9b85f7f5e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/2f0f1659cff24b15b964d5a9b85f7f5e 2024-11-20T19:28:08,895 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a2272a705a4f469bb87e8059dbc2c5cd to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a2272a705a4f469bb87e8059dbc2c5cd 2024-11-20T19:28:08,896 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/14386f78fe2d48dab49a5791a7bcd878 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/14386f78fe2d48dab49a5791a7bcd878 2024-11-20T19:28:08,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d0dc383a7d634bdbb0222076b0ba3667 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d0dc383a7d634bdbb0222076b0ba3667 2024-11-20T19:28:08,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/378cf7cf73f04edfa29a3b79111c7368 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/378cf7cf73f04edfa29a3b79111c7368 2024-11-20T19:28:08,898 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/90ba572a77ac4ee7a689e5197a1857e1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/90ba572a77ac4ee7a689e5197a1857e1 2024-11-20T19:28:08,899 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/fc364298852346ef9ea16dfb8c9c96ae to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/fc364298852346ef9ea16dfb8c9c96ae 2024-11-20T19:28:08,899 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/1042e92fc7d743e0998d8d9b0f2fbadf to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/1042e92fc7d743e0998d8d9b0f2fbadf 2024-11-20T19:28:08,900 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/5a133eb583f547d488f8dae44875c372 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/5a133eb583f547d488f8dae44875c372 2024-11-20T19:28:08,900 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d9701cbc30ec48eaa5327c5e43b8caa1 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/d9701cbc30ec48eaa5327c5e43b8caa1 2024-11-20T19:28:08,901 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a189aa6b8af04dd9a69fb3c859485705 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a189aa6b8af04dd9a69fb3c859485705 2024-11-20T19:28:08,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/36da921c16524cc4974b0ac3926c1ee4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/36da921c16524cc4974b0ac3926c1ee4 2024-11-20T19:28:08,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/8a3734a512264c8c9c50521738e684a9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/8a3734a512264c8c9c50521738e684a9 2024-11-20T19:28:08,903 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/409d9a1e9f2148ac88d3702567906369 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/409d9a1e9f2148ac88d3702567906369 2024-11-20T19:28:08,903 DEBUG [StoreCloser-TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/404c3d7fb02f43079195c8e09077b51b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/404c3d7fb02f43079195c8e09077b51b 2024-11-20T19:28:08,906 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/recovered.edits/386.seqid, newMaxSeqId=386, maxSeqId=4 2024-11-20T19:28:08,907 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13. 2024-11-20T19:28:08,907 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1635): Region close journal for 6e8af0e10da4be5fa330b00646bb6e13: 2024-11-20T19:28:08,908 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(170): Closed 6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:08,908 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=6e8af0e10da4be5fa330b00646bb6e13, regionState=CLOSED 2024-11-20T19:28:08,910 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-20T19:28:08,910 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; CloseRegionProcedure 6e8af0e10da4be5fa330b00646bb6e13, server=db9c3a6c6492,41229,1732130701496 in 2.8570 sec 2024-11-20T19:28:08,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-11-20T19:28:08,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e8af0e10da4be5fa330b00646bb6e13, UNASSIGN in 2.8600 sec 2024-11-20T19:28:08,912 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-20T19:28:08,912 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.8620 sec 2024-11-20T19:28:08,913 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130888913"}]},"ts":"1732130888913"} 2024-11-20T19:28:08,914 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T19:28:08,923 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T19:28:08,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.9010 sec 2024-11-20T19:28:10,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:28:10,128 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-20T19:28:10,129 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T19:28:10,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:28:10,130 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:28:10,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T19:28:10,131 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=173, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:28:10,133 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,135 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C, FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/recovered.edits] 2024-11-20T19:28:10,137 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/0858da7792a14d4493fa2503fff32a8b to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/0858da7792a14d4493fa2503fff32a8b 2024-11-20T19:28:10,138 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/0993a67cf03d4c778eb209e2e7ad0794 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/0993a67cf03d4c778eb209e2e7ad0794 2024-11-20T19:28:10,139 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/29eff580f1f94b70ac08b422fb4dc120 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/29eff580f1f94b70ac08b422fb4dc120 2024-11-20T19:28:10,140 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7fad1f5d3f774ea38047b0b2f2cee18e to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/A/7fad1f5d3f774ea38047b0b2f2cee18e 2024-11-20T19:28:10,143 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/22a57867772e4a638eaf6e117a8275b8 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/22a57867772e4a638eaf6e117a8275b8 2024-11-20T19:28:10,144 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/6591a2bd4c104da69a3a29668d0cd424 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/6591a2bd4c104da69a3a29668d0cd424 2024-11-20T19:28:10,145 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/cab519206d1c41239da9317ccbf9b8bb to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/cab519206d1c41239da9317ccbf9b8bb 2024-11-20T19:28:10,147 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d9224aa228d7413d8bf324bc8450f591 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/B/d9224aa228d7413d8bf324bc8450f591 2024-11-20T19:28:10,149 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/71ab47d8b7a84ec48b13bf37105e53b9 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/71ab47d8b7a84ec48b13bf37105e53b9 2024-11-20T19:28:10,151 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a5c0c02c3e7749dc8e3fbae331e0c4bc to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/a5c0c02c3e7749dc8e3fbae331e0c4bc 2024-11-20T19:28:10,152 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/afb7b4ed6129497d9c74538023bdcb0f to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/afb7b4ed6129497d9c74538023bdcb0f 2024-11-20T19:28:10,153 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/f9062ba6e0434586b1fb4856c04d2cb4 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/C/f9062ba6e0434586b1fb4856c04d2cb4 2024-11-20T19:28:10,157 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/recovered.edits/386.seqid to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13/recovered.edits/386.seqid 2024-11-20T19:28:10,157 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/default/TestAcidGuarantees/6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,157 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T19:28:10,158 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T19:28:10,159 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T19:28:10,163 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112003d9a919582c4834ba95309a5d83a55a_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112003d9a919582c4834ba95309a5d83a55a_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,165 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112006bafa37068a4429b1e5bdf09b5b4b24_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112006bafa37068a4429b1e5bdf09b5b4b24_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,167 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200f4504b0f3b848e893700933a8ddbee3_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200f4504b0f3b848e893700933a8ddbee3_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,168 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112016a70a096bbc41d58116d33ba259a0e3_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112016a70a096bbc41d58116d33ba259a0e3_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,170 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204e1f42986691492895f5bc9962cf5656_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204e1f42986691492895f5bc9962cf5656_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,172 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112052cb774883fa4bb6aec0e31398be915f_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112052cb774883fa4bb6aec0e31398be915f_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,174 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205837e1f1343f4683bd929f5e55a977a0_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205837e1f1343f4683bd929f5e55a977a0_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,175 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206d3e7bbf780c47b3bc1b216c199719f1_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206d3e7bbf780c47b3bc1b216c199719f1_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,177 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207a630def63d84ffd90a124da0f90e997_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207a630def63d84ffd90a124da0f90e997_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,179 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207e3dacfb832742aca1d3b87cbaf67f02_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207e3dacfb832742aca1d3b87cbaf67f02_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,181 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208273c5151cd842bab2979a674c57f1a9_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208273c5151cd842bab2979a674c57f1a9_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,182 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112082bba7336f704f23a8b07d0f8723c71e_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112082bba7336f704f23a8b07d0f8723c71e_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,184 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112092284b95c29c469799d5750aac58417c_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112092284b95c29c469799d5750aac58417c_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,186 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112096a7a79c44374cb6b2411c3dfeb60cd3_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112096a7a79c44374cb6b2411c3dfeb60cd3_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,188 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b910d53b227349f78949dc8255e6f18f_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b910d53b227349f78949dc8255e6f18f_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,189 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c54b4009876147f691affc0aab6b7af2_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c54b4009876147f691affc0aab6b7af2_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,191 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120da4372a9df884d788a9b62e415150ff4_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120da4372a9df884d788a9b62e415150ff4_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,191 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f8c695e342fd4f53a53356a3fb1d6e29_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f8c695e342fd4f53a53356a3fb1d6e29_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,192 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb61b2c3d7874df3b368aed21f34c5a7_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb61b2c3d7874df3b368aed21f34c5a7_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,193 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb6b914864f841389092aa95a7bd47c0_6e8af0e10da4be5fa330b00646bb6e13 to hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb6b914864f841389092aa95a7bd47c0_6e8af0e10da4be5fa330b00646bb6e13 2024-11-20T19:28:10,193 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T19:28:10,194 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=173, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:28:10,196 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T19:28:10,197 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T19:28:10,198 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=173, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:28:10,198 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T19:28:10,198 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732130890198"}]},"ts":"9223372036854775807"} 2024-11-20T19:28:10,199 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T19:28:10,199 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6e8af0e10da4be5fa330b00646bb6e13, NAME => 'TestAcidGuarantees,,1732130862261.6e8af0e10da4be5fa330b00646bb6e13.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T19:28:10,199 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T19:28:10,199 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732130890199"}]},"ts":"9223372036854775807"} 2024-11-20T19:28:10,201 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T19:28:10,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T19:28:10,240 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=173, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:28:10,240 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 111 msec 2024-11-20T19:28:10,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T19:28:10,433 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-20T19:28:10,443 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=238 (was 237) - Thread LEAK? -, OpenFileDescriptor=449 (was 447) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=669 (was 665) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4138 (was 3198) - AvailableMemoryMB LEAK? - 2024-11-20T19:28:10,443 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-20T19:28:10,443 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:28:10,443 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38630296 to 127.0.0.1:49985 2024-11-20T19:28:10,443 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:10,443 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T19:28:10,443 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1328901242, stopped=false 2024-11-20T19:28:10,444 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=db9c3a6c6492,46833,1732130700613 2024-11-20T19:28:10,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T19:28:10,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T19:28:10,448 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-20T19:28:10,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:28:10,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:28:10,449 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:10,449 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T19:28:10,449 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'db9c3a6c6492,41229,1732130701496' ***** 2024-11-20T19:28:10,449 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-20T19:28:10,449 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T19:28:10,449 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T19:28:10,449 INFO [RS:0;db9c3a6c6492:41229 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T19:28:10,449 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-20T19:28:10,449 INFO [RS:0;db9c3a6c6492:41229 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T19:28:10,450 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(3579): Received CLOSE for 26eb6e9aec5a60a946cc3400b187b0a4 2024-11-20T19:28:10,450 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1224): stopping server db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:10,450 DEBUG [RS:0;db9c3a6c6492:41229 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:10,450 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T19:28:10,450 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T19:28:10,450 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T19:28:10,450 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-20T19:28:10,450 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 26eb6e9aec5a60a946cc3400b187b0a4, disabling compactions & flushes 2024-11-20T19:28:10,450 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. 2024-11-20T19:28:10,450 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. 2024-11-20T19:28:10,450 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-20T19:28:10,450 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. after waiting 0 ms 2024-11-20T19:28:10,450 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1603): Online Regions={26eb6e9aec5a60a946cc3400b187b0a4=hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T19:28:10,450 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. 2024-11-20T19:28:10,451 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 26eb6e9aec5a60a946cc3400b187b0a4 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-20T19:28:10,451 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T19:28:10,451 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T19:28:10,451 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T19:28:10,451 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T19:28:10,451 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T19:28:10,451 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-20T19:28:10,453 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 26eb6e9aec5a60a946cc3400b187b0a4 2024-11-20T19:28:10,467 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/namespace/26eb6e9aec5a60a946cc3400b187b0a4/.tmp/info/5bb1421e55b247fdb209a3a5a7b17d1f is 45, key is default/info:d/1732130706025/Put/seqid=0 2024-11-20T19:28:10,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742555_1731 (size=5037) 2024-11-20T19:28:10,472 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/.tmp/info/140755241e2745b69710f4bf14d1f949 is 143, key is hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4./info:regioninfo/1732130705872/Put/seqid=0 2024-11-20T19:28:10,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742556_1732 (size=7725) 2024-11-20T19:28:10,546 INFO [regionserver/db9c3a6c6492:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T19:28:10,654 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 26eb6e9aec5a60a946cc3400b187b0a4 2024-11-20T19:28:10,854 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 26eb6e9aec5a60a946cc3400b187b0a4 2024-11-20T19:28:10,870 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/namespace/26eb6e9aec5a60a946cc3400b187b0a4/.tmp/info/5bb1421e55b247fdb209a3a5a7b17d1f 2024-11-20T19:28:10,874 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/namespace/26eb6e9aec5a60a946cc3400b187b0a4/.tmp/info/5bb1421e55b247fdb209a3a5a7b17d1f as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/namespace/26eb6e9aec5a60a946cc3400b187b0a4/info/5bb1421e55b247fdb209a3a5a7b17d1f 2024-11-20T19:28:10,875 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/.tmp/info/140755241e2745b69710f4bf14d1f949 2024-11-20T19:28:10,877 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/namespace/26eb6e9aec5a60a946cc3400b187b0a4/info/5bb1421e55b247fdb209a3a5a7b17d1f, entries=2, sequenceid=6, filesize=4.9 K 2024-11-20T19:28:10,878 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 26eb6e9aec5a60a946cc3400b187b0a4 in 427ms, sequenceid=6, compaction requested=false 2024-11-20T19:28:10,882 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/namespace/26eb6e9aec5a60a946cc3400b187b0a4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T19:28:10,883 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. 2024-11-20T19:28:10,883 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 26eb6e9aec5a60a946cc3400b187b0a4: 2024-11-20T19:28:10,883 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732130704559.26eb6e9aec5a60a946cc3400b187b0a4. 2024-11-20T19:28:10,897 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/.tmp/rep_barrier/1062afdc4cad480ba0ef1b192a9b3db1 is 102, key is TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45./rep_barrier:/1732130737046/DeleteFamily/seqid=0 2024-11-20T19:28:10,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742557_1733 (size=6025) 2024-11-20T19:28:11,054 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T19:28:11,163 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-20T19:28:11,163 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T19:28:11,232 INFO [regionserver/db9c3a6c6492:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T19:28:11,232 INFO [regionserver/db9c3a6c6492:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T19:28:11,255 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T19:28:11,300 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/.tmp/rep_barrier/1062afdc4cad480ba0ef1b192a9b3db1 2024-11-20T19:28:11,327 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/.tmp/table/3571327421444a0bb413b457362b18a0 is 96, key is TestAcidGuarantees,,1732130706232.6fb4967d0e6203ca72c498496394ce45./table:/1732130737046/DeleteFamily/seqid=0 2024-11-20T19:28:11,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742558_1734 (size=5942) 2024-11-20T19:28:11,455 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-20T19:28:11,455 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T19:28:11,455 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T19:28:11,655 DEBUG [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T19:28:11,731 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/.tmp/table/3571327421444a0bb413b457362b18a0 2024-11-20T19:28:11,734 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/.tmp/info/140755241e2745b69710f4bf14d1f949 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/info/140755241e2745b69710f4bf14d1f949 2024-11-20T19:28:11,737 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/info/140755241e2745b69710f4bf14d1f949, entries=22, sequenceid=93, filesize=7.5 K 2024-11-20T19:28:11,738 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/.tmp/rep_barrier/1062afdc4cad480ba0ef1b192a9b3db1 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/rep_barrier/1062afdc4cad480ba0ef1b192a9b3db1 2024-11-20T19:28:11,742 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/rep_barrier/1062afdc4cad480ba0ef1b192a9b3db1, entries=6, sequenceid=93, filesize=5.9 K 2024-11-20T19:28:11,743 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/.tmp/table/3571327421444a0bb413b457362b18a0 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/table/3571327421444a0bb413b457362b18a0 2024-11-20T19:28:11,747 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/table/3571327421444a0bb413b457362b18a0, entries=9, sequenceid=93, filesize=5.8 K 2024-11-20T19:28:11,748 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1297ms, sequenceid=93, compaction requested=false 2024-11-20T19:28:11,757 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-20T19:28:11,757 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T19:28:11,757 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T19:28:11,757 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T19:28:11,757 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T19:28:11,855 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1250): stopping server db9c3a6c6492,41229,1732130701496; all regions closed. 2024-11-20T19:28:11,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741834_1010 (size=26050) 2024-11-20T19:28:11,861 DEBUG [RS:0;db9c3a6c6492:41229 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/oldWALs 2024-11-20T19:28:11,861 INFO [RS:0;db9c3a6c6492:41229 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL db9c3a6c6492%2C41229%2C1732130701496.meta:.meta(num 1732130704233) 2024-11-20T19:28:11,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741832_1008 (size=16945409) 2024-11-20T19:28:11,865 DEBUG [RS:0;db9c3a6c6492:41229 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/oldWALs 2024-11-20T19:28:11,865 INFO [RS:0;db9c3a6c6492:41229 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL db9c3a6c6492%2C41229%2C1732130701496:(num 1732130703336) 2024-11-20T19:28:11,865 DEBUG [RS:0;db9c3a6c6492:41229 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:11,865 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T19:28:11,865 INFO [RS:0;db9c3a6c6492:41229 {}] hbase.ChoreService(370): Chore service for: regionserver/db9c3a6c6492:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-20T19:28:11,866 INFO [regionserver/db9c3a6c6492:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T19:28:11,866 INFO [RS:0;db9c3a6c6492:41229 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41229 2024-11-20T19:28:11,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db9c3a6c6492,41229,1732130701496 2024-11-20T19:28:11,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T19:28:11,873 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$359/0x00007f9b7c8f2188@14e9af4c rejected from java.util.concurrent.ThreadPoolExecutor@51eb33b5[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-20T19:28:11,873 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db9c3a6c6492,41229,1732130701496] 2024-11-20T19:28:11,874 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing db9c3a6c6492,41229,1732130701496; numProcessing=1 2024-11-20T19:28:11,889 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/db9c3a6c6492,41229,1732130701496 already deleted, retry=false 2024-11-20T19:28:11,889 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; db9c3a6c6492,41229,1732130701496 expired; onlineServers=0 2024-11-20T19:28:11,890 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'db9c3a6c6492,46833,1732130700613' ***** 2024-11-20T19:28:11,890 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T19:28:11,890 DEBUG [M:0;db9c3a6c6492:46833 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69f992f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9c3a6c6492/172.17.0.2:0 2024-11-20T19:28:11,890 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.HRegionServer(1224): stopping server db9c3a6c6492,46833,1732130700613 2024-11-20T19:28:11,890 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.HRegionServer(1250): stopping server db9c3a6c6492,46833,1732130700613; all regions closed. 2024-11-20T19:28:11,890 DEBUG [M:0;db9c3a6c6492:46833 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:28:11,890 DEBUG [M:0;db9c3a6c6492:46833 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T19:28:11,891 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T19:28:11,891 DEBUG [M:0;db9c3a6c6492:46833 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T19:28:11,891 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster-HFileCleaner.small.0-1732130702961 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9c3a6c6492:0:becomeActiveMaster-HFileCleaner.small.0-1732130702961,5,FailOnTimeoutGroup] 2024-11-20T19:28:11,891 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster-HFileCleaner.large.0-1732130702960 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9c3a6c6492:0:becomeActiveMaster-HFileCleaner.large.0-1732130702960,5,FailOnTimeoutGroup] 2024-11-20T19:28:11,891 INFO [M:0;db9c3a6c6492:46833 {}] hbase.ChoreService(370): Chore service for: master/db9c3a6c6492:0 had [] on shutdown 2024-11-20T19:28:11,891 DEBUG [M:0;db9c3a6c6492:46833 {}] master.HMaster(1733): Stopping service threads 2024-11-20T19:28:11,892 INFO [M:0;db9c3a6c6492:46833 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T19:28:11,892 ERROR [M:0;db9c3a6c6492:46833 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[IPC Client (1308410445) connection to localhost/127.0.0.1:40371 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:40371,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-20T19:28:11,893 INFO [M:0;db9c3a6c6492:46833 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T19:28:11,893 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T19:28:11,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T19:28:11,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:28:11,898 DEBUG [M:0;db9c3a6c6492:46833 {}] zookeeper.ZKUtil(347): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T19:28:11,898 WARN [M:0;db9c3a6c6492:46833 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T19:28:11,898 INFO [M:0;db9c3a6c6492:46833 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-20T19:28:11,899 INFO [M:0;db9c3a6c6492:46833 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T19:28:11,899 DEBUG [M:0;db9c3a6c6492:46833 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T19:28:11,899 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:28:11,899 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T19:28:11,899 DEBUG [M:0;db9c3a6c6492:46833 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:28:11,899 DEBUG [M:0;db9c3a6c6492:46833 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T19:28:11,899 DEBUG [M:0;db9c3a6c6492:46833 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:28:11,899 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=804.08 KB heapSize=991.95 KB 2024-11-20T19:28:11,921 DEBUG [M:0;db9c3a6c6492:46833 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d8df0bb45ef34f648a0379d3d5bd347a is 82, key is hbase:meta,,1/info:regioninfo/1732130704357/Put/seqid=0 2024-11-20T19:28:11,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742559_1735 (size=5672) 2024-11-20T19:28:11,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T19:28:11,981 INFO [RS:0;db9c3a6c6492:41229 {}] regionserver.HRegionServer(1307): Exiting; stopping=db9c3a6c6492,41229,1732130701496; zookeeper connection closed. 2024-11-20T19:28:11,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41229-0x1015afe9cb30001, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T19:28:11,981 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@773b180b {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@773b180b 2024-11-20T19:28:11,982 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T19:28:12,325 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2334 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d8df0bb45ef34f648a0379d3d5bd347a 2024-11-20T19:28:12,349 DEBUG [M:0;db9c3a6c6492:46833 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b8d263c9473f4222be4fdad34ae95b66 is 2278, key is \x00\x00\x00\x00\x00\x00\x00\x96/proc:d/1732130864326/Put/seqid=0 2024-11-20T19:28:12,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742560_1736 (size=43958) 2024-11-20T19:28:12,758 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=803.53 KB at sequenceid=2334 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b8d263c9473f4222be4fdad34ae95b66 2024-11-20T19:28:12,762 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b8d263c9473f4222be4fdad34ae95b66 2024-11-20T19:28:12,777 DEBUG [M:0;db9c3a6c6492:46833 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/43f25fc06c464a74916204a1a9474437 is 69, key is db9c3a6c6492,41229,1732130701496/rs:state/1732130703091/Put/seqid=0 2024-11-20T19:28:12,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073742561_1737 (size=5156) 2024-11-20T19:28:13,181 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2334 (bloomFilter=true), to=hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/43f25fc06c464a74916204a1a9474437 2024-11-20T19:28:13,193 DEBUG [M:0;db9c3a6c6492:46833 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d8df0bb45ef34f648a0379d3d5bd347a as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d8df0bb45ef34f648a0379d3d5bd347a 2024-11-20T19:28:13,197 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d8df0bb45ef34f648a0379d3d5bd347a, entries=8, sequenceid=2334, filesize=5.5 K 2024-11-20T19:28:13,198 DEBUG [M:0;db9c3a6c6492:46833 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b8d263c9473f4222be4fdad34ae95b66 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b8d263c9473f4222be4fdad34ae95b66 2024-11-20T19:28:13,202 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b8d263c9473f4222be4fdad34ae95b66 2024-11-20T19:28:13,202 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b8d263c9473f4222be4fdad34ae95b66, entries=173, sequenceid=2334, filesize=42.9 K 2024-11-20T19:28:13,203 DEBUG [M:0;db9c3a6c6492:46833 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/43f25fc06c464a74916204a1a9474437 as hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/43f25fc06c464a74916204a1a9474437 2024-11-20T19:28:13,207 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40371/user/jenkins/test-data/613a2e2b-60d9-60a2-535d-8b46ef575203/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/43f25fc06c464a74916204a1a9474437, entries=1, sequenceid=2334, filesize=5.0 K 2024-11-20T19:28:13,208 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.HRegion(3040): Finished flush of dataSize ~804.08 KB/823381, heapSize ~991.66 KB/1015456, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1309ms, sequenceid=2334, compaction requested=false 2024-11-20T19:28:13,209 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:28:13,209 DEBUG [M:0;db9c3a6c6492:46833 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T19:28:13,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36171 is added to blk_1073741830_1006 (size=976306) 2024-11-20T19:28:13,212 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T19:28:13,212 INFO [M:0;db9c3a6c6492:46833 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-20T19:28:13,212 INFO [M:0;db9c3a6c6492:46833 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46833 2024-11-20T19:28:13,222 DEBUG [M:0;db9c3a6c6492:46833 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/db9c3a6c6492,46833,1732130700613 already deleted, retry=false 2024-11-20T19:28:13,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T19:28:13,331 INFO [M:0;db9c3a6c6492:46833 {}] regionserver.HRegionServer(1307): Exiting; stopping=db9c3a6c6492,46833,1732130700613; zookeeper connection closed. 2024-11-20T19:28:13,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46833-0x1015afe9cb30000, quorum=127.0.0.1:49985, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T19:28:13,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10ba49e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T19:28:13,337 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@661c2e9c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T19:28:13,338 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T19:28:13,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ca71a25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T19:28:13,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@134e7cc5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/hadoop.log.dir/,STOPPED} 2024-11-20T19:28:13,341 WARN [BP-1052772000-172.17.0.2-1732130696748 heartbeating to localhost/127.0.0.1:40371 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T19:28:13,341 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T19:28:13,341 WARN [BP-1052772000-172.17.0.2-1732130696748 heartbeating to localhost/127.0.0.1:40371 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1052772000-172.17.0.2-1732130696748 (Datanode Uuid 62e061be-1081-4234-973e-e9d14a5f767e) service to localhost/127.0.0.1:40371 2024-11-20T19:28:13,341 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T19:28:13,344 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/cluster_db2e18e3-a485-48e5-5213-a6c18f012833/dfs/data/data1/current/BP-1052772000-172.17.0.2-1732130696748 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T19:28:13,345 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/cluster_db2e18e3-a485-48e5-5213-a6c18f012833/dfs/data/data2/current/BP-1052772000-172.17.0.2-1732130696748 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T19:28:13,345 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T19:28:13,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T19:28:13,353 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T19:28:13,353 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T19:28:13,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T19:28:13,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/752ccc5e-597a-78e1-f535-f12fe88f5b72/hadoop.log.dir/,STOPPED} 2024-11-20T19:28:13,372 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-20T19:28:13,518 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down